From 60313512fb163ced5c5ea4249dff57174bc1d06b Mon Sep 17 00:00:00 2001 From: WangYi Date: Fri, 20 May 2022 09:00:29 +0800 Subject: [PATCH 01/50] add tensor_functions --- .../api/python/framework/tensor_functions.cpp | 338 ++++++++++++++++++ .../api/python/framework/tensor_functions.h | 31 ++ 2 files changed, 369 insertions(+) create mode 100644 oneflow/api/python/framework/tensor_functions.cpp create mode 100644 oneflow/api/python/framework/tensor_functions.h diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp new file mode 100644 index 00000000000..b3cc32858b5 --- /dev/null +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -0,0 +1,338 @@ +/* +Copyright 2020 The OneFlow Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +#include +#include +#include "oneflow/api/python/exception/exception.h" +#include "oneflow/api/python/framework/size.h" +#include "oneflow/api/python/framework/tensor.h" +#include "oneflow/api/python/functional/common.h" +#include "oneflow/api/python/functional/python_arg.h" +#include "oneflow/api/python/functional/functional_api.yaml.pybind.h" +#include "oneflow/api/python/functional/tensor_api.yaml.pybind.h" +#include "oneflow/api/python/of_api_registry.h" +#include "oneflow/api/python/ofblob/ofblob.e.h" +#include "oneflow/api/python/utils/tensor_utils.h" +#include "oneflow/core/autograd/autograd_engine.h" +#include "oneflow/core/framework/tensor.h" +#include "oneflow/core/framework/tensor_rpc_util.h" +#include "oneflow/core/framework/device.h" +#include "oneflow/core/common/stride.h" +#include "oneflow/core/framework/dtype.h" +#include "oneflow/core/framework/placement_utils.h" +#include "oneflow/core/functional/functional.h" +#include "oneflow/core/functional/tensor_index.h" + +namespace oneflow { +namespace one { + +#define ASSERT(x) (x).GetOrThrow() +#define ASSERT_PTR(x) (x).GetPtrOrThrow() + +#define NB_UNARY_FUNC(func_name, bind_func, name) \ + static PyObject* func_name(PyObject* self) { \ + HANDLE_ERRORS \ + PyObject* tuple = PyTuple_Pack(1, self); \ + std::cout << "cpython " << name << std::endl; \ + auto* result = bind_func(NULL, tuple, NULL); \ + if (PyErr_Occurred()) { throw py::error_already_set(); } \ + return result; \ + END_HANDLE_ERRORS \ + } + +#define NB_BINARY_FUNC(func_name, bind_func, name) \ + static PyObject* func_name(PyObject* a, PyObject* b) { \ + HANDLE_ERRORS \ + PyObject* tuple = PyTuple_Pack(2, a, b); \ + std::cout << "cpython " << name << std::endl; \ + auto* result = bind_func(NULL, tuple, NULL); \ + if (PyErr_Occurred()) { throw py::error_already_set(); } \ + return result; \ + END_HANDLE_ERRORS \ + } + +NB_BINARY_FUNC(PyTensorObject_add, functional::add, "add"); +NB_BINARY_FUNC(PyTensorObject_sub, functional::sub, "sub"); +NB_BINARY_FUNC(PyTensorObject_mul, functional::mul, "mul"); +NB_BINARY_FUNC(PyTensorObject_fmod, functional::fmod, "fmod"); +NB_BINARY_FUNC(PyTensorObject_div, functional::div, "div"); +PyObject* PyTensorObject_pow(PyObject* a, PyObject* b, PyObject* unsed) { + HANDLE_ERRORS + PyObject* tuple = PyTuple_Pack(2, a, b); + auto* result = functional::pow(NULL, tuple, NULL); + if (PyErr_Occurred()) { throw py::error_already_set(); } + // std::cout << "using cpython pow" << std::endl; + return result; + END_HANDLE_ERRORS +} +NB_UNARY_FUNC(PyTensorObject_negative, functional::negative, "negative"); +// NB_UNARY_FUNC(PyTensorObject_positive, functional::positive); +NB_UNARY_FUNC(PyTensorObject_absolute, functional::abs, "abs"); + +static PyObject* PyTensorObject_invert(PyObject* self) { + HANDLE_ERRORS + CHECK_OR_THROW(PyTensor_Unpack(self)->dtype()->data_type() == DataType::kBool) + << "~ (operator.invert) is only implemented on integer and Boolean-type tensors"; + PyObject* tuple = PyTuple_Pack(1, self); + auto* result = functional::logical_not(NULL, tuple, NULL); + if (PyErr_Occurred()) { throw py::error_already_set(); } + std::cout << "using cpython invert" << std::endl; + return result; + END_HANDLE_ERRORS +} + +NB_BINARY_FUNC(PyTensorObject_and, functional::logical_and, "logical_and"); +NB_BINARY_FUNC(PyTensorObject_xor, functional::logical_xor, "logical_xor"); +NB_BINARY_FUNC(PyTensorObject_or, functional::logical_or, "logical or"); + +#define INPLACE_BINARY_FUNC(func_name, bind_func, name) \ + static PyObject* func_name(PyObject* a, PyObject* b) { \ + HANDLE_ERRORS \ + PyObject* tuple = PyTuple_Pack(2, a, b); \ + PyObject* dict = PyDict_New(); \ + CHECK_OR_THROW(PyDict_SetItemString(dict, "inplace", Py_True) > -1); \ + const auto& result = bind_func(NULL, tuple, dict); \ + if (PyErr_Occurred()) { throw py::error_already_set(); } \ + return result; \ + END_HANDLE_ERRORS \ + } + +INPLACE_BINARY_FUNC(PyTensorObject_inplace_add, functional::add, "add"); +INPLACE_BINARY_FUNC(PyTensorObject_inplace_sub, functional::sub, "sub"); +INPLACE_BINARY_FUNC(PyTensorObject_inplace_mul, functional::mul, "mul"); +INPLACE_BINARY_FUNC(PyTensorObject_inplace_fmod, functional::fmod, "fmod"); + +PyObject* PyTensorObject_inplace_pow(PyObject* a, PyObject* b, PyObject* unsed) { + HANDLE_ERRORS + PyObject* tuple = PyTuple_Pack(2, a, b); + PyObject* dict = PyDict_New(); + CHECK_OR_THROW(PyDict_SetItemString(dict, "inplace", Py_True) > -1); + auto* result = functional::pow(NULL, tuple, NULL); + if (PyErr_Occurred()) { throw py::error_already_set(); } + // std::cout << "using cpython bind_func" << std::endl; + return result; + END_HANDLE_ERRORS +} + +INPLACE_BINARY_FUNC(PyTensorObject_inplace_and, functional::logical_and, "logical_and"); +INPLACE_BINARY_FUNC(PyTensorObject_inplace_xor, functional::logical_xor, "logical_xor"); +INPLACE_BINARY_FUNC(PyTensorObject_inplace_or, functional::logical_or, "logical_or"); +NB_BINARY_FUNC(PyTensorObject_floor_div, functional::floor_divide, "floor divide"); +NB_BINARY_FUNC(PyTensorObject_true_div, functional::div, "true_div"); +INPLACE_BINARY_FUNC(PyTensorObject_inplace_floor_div, functional::floor_divide, "floor_divide"); +INPLACE_BINARY_FUNC(PyTensorObject_inplace_true_div, functional::div, "true_divide"); +NB_BINARY_FUNC(PyTensorObject_matrix_multiply, functional::matmul, "matmul"); +// INPLACE_BINARY_FUNC(PyTensorObject_inplace_matrix_multiply, functional::matmul, "matmul"); + +PyNumberMethods PyTensorObject_as_number = { + PyTensorObject_add, // nb_add, __add__ + PyTensorObject_sub, // nb_subtract, __sub__ + PyTensorObject_mul, // nb_multiply, __mul__ + PyTensorObject_fmod, // nb_remainder, __mod__, __rmod__ + NULL, // nb_divmod + PyTensorObject_pow, // nb_power + PyTensorObject_negative, // nb_negative + NULL, // nb_positive + PyTensorObject_absolute, // nb_absolute + NULL, // nb_bool torch doesn't implement + PyTensorObject_invert, // nb_invert + NULL, // nb_lshift + NULL, // nb_rshift + PyTensorObject_and, // nb_and + PyTensorObject_xor, // nb_xor + PyTensorObject_or, // nb_or + NULL, // nb_int + NULL, // nb_reserved + NULL, // nb_float + + NULL, // bug PyTensorObject_inplace_add, //nb_inplace_add + PyTensorObject_inplace_sub, // nb_inplace_sub + PyTensorObject_inplace_mul, // nb_inplace_mul + PyTensorObject_inplace_fmod, // nb_inplace_remainder + PyTensorObject_inplace_pow, // nb_inplace_pow + NULL, // nb_inplace_lshift + NULL, // nb_inplace_rshift + PyTensorObject_inplace_and, // nb_inplace_and + PyTensorObject_inplace_xor, // nb_inplace_xor + PyTensorObject_inplace_or, // nb_inplace_or + + PyTensorObject_floor_div, // nb_floor_div + PyTensorObject_true_div, // nb_true_div + PyTensorObject_inplace_floor_div, // nb_inplace_floor_div + PyTensorObject_inplace_true_div, // nb_inplace_true_div + + NULL, // nb_index + PyTensorObject_matrix_multiply, // nb_matrix_multiply + NULL, // not implemented yet nb_inplace_matrix_multiply + +}; + + +// extra methods + +static PyObject* PyTensorObject_byte(PyObject* self, PyObject* unused) { + HANDLE_ERRORS + // std::cout << "cpython byte" << std::endl; + return PyTensor_New(ASSERT_PTR(functional::To(PyTensor_Unpack(self), DType::Int8(), false))); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_dim(PyObject* self, PyObject* unused) { + HANDLE_ERRORS + // std::cout << "cpython dim" << std::endl; + return functional::CastToPyObject(PyTensor_Unpack(self)->ndim()); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_nelement(PyObject* self, PyObject* unused) { + HANDLE_ERRORS + // std::cout << "cpython nelement" << std::endl; + return functional::CastToPyObject(PyTensor_Unpack(self)->nelement()); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_element_size(PyObject* self, PyObject* unused) { + HANDLE_ERRORS + // std::cout << "cpython element_size" << std::endl; + return functional::CastToPyObject(PyTensor_Unpack(self)->dtype()->bytes()); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_get_device(PyObject* self, PyObject* unused) { + HANDLE_ERRORS + // std::cout << "cpython get_device" << std::endl; + auto device_type = ASSERT(PyTensor_Unpack(self)->device())->enum_type(); + CHECK_OR_THROW(device_type == DeviceType::kCUDA) + << "get_device is only available for GPU tensor."; + return functional::CastToPyObject(ASSERT(PyTensor_Unpack(self)->device())->device_id()); + END_HANDLE_ERRORS +} + +#define UNARY_METHOD(func_name, bind_func, name) \ + static PyObject* func_name(PyObject* self, PyObject* unused) { \ + HANDLE_ERRORS \ + return PyTensor_New(ASSERT_PTR(bind_func(PyTensor_Unpack(self)))); \ + END_HANDLE_ERRORS \ + } + +UNARY_METHOD(PyTensorObject_abs, functional::Abs, "abs"); +UNARY_METHOD(PyTensorObject_exp, functional::Exp, "exp"); +UNARY_METHOD(PyTensorObject_floor, functional::Floor, "floor"); +UNARY_METHOD(PyTensorObject_floor_, functional::Floor_, "floor_"); +UNARY_METHOD(PyTensorObject_sign, functional::Sign, "sign"); +UNARY_METHOD(PyTensorObject_gelu, functional::Gelu, "gelu"); +UNARY_METHOD(PyTensorObject_mish, functional::Mish, "mish"); +UNARY_METHOD(PyTensorObject_negative, functional::Negative, "negatinve"); +UNARY_METHOD(PyTensorObject_sigmoid, functional::Sigmoid, "sigmoid"); +UNARY_METHOD(PyTensorObject_silu, functional::Silu, "silu"); +UNARY_METHOD(PyTensorObject_selu, functional::Selu, "selu"); +UNARY_METHOD(PyTensorObject_softsign, functional::SoftSign, "softsign"); +UNARY_METHOD(PyTensorObject_log1p, functional::Log1p, "log1p"); +UNARY_METHOD(PyTensorObject_log2, functional::Log2, "log2"); +UNARY_METHOD(PyTensorObject_reciprocal, functional::Reciprocal, "reciprocal"); +UNARY_METHOD(PyTensorObject_ceil, functional::Ceil, "ceil"); +UNARY_METHOD(PyTensorObject_erf, functional::Erf, "erf"); +UNARY_METHOD(PyTensorObject_erfc, functional::Erfc, "erfc"); +UNARY_METHOD(PyTensorObject_erfinv, functional::Erfinv, "erfinv"); +UNARY_METHOD(PyTensorObject_erfinv_, functional::ErfinvInplace, "erfinv_inplace"); +UNARY_METHOD(PyTensorObject_expm1, functional::Expm1, "expm1"); +UNARY_METHOD(PyTensorObject_log, functional::Log, "log"); +UNARY_METHOD(PyTensorObject_rsqrt, functional::Rsqrt, "rsqrt"); +UNARY_METHOD(PyTensorObject_sqrt, functional::Sqrt, "sqrt"); +UNARY_METHOD(PyTensorObject_square, functional::Square, "square"); +UNARY_METHOD(PyTensorObject_round, functional::Round, "round"); +UNARY_METHOD(PyTensorObject_t, functional::TransposeAllDimFunction, "t"); +UNARY_METHOD(PyTensorObject_isnan, functional::IsNan, "isnan"); +UNARY_METHOD(PyTensorObject_isinf, functional::IsInf, "isinf"); +UNARY_METHOD(PyTensorObject_sin, functional::Sin, "sin"); +UNARY_METHOD(PyTensorObject_sin_, functional::Sin_, "sin_"); +UNARY_METHOD(PyTensorObject_asin, functional::Asin, "asin"); +UNARY_METHOD(PyTensorObject_cos, functional::Cos, "cos"); +UNARY_METHOD(PyTensorObject_acos, functional::Acos, "acos"); +UNARY_METHOD(PyTensorObject_tan, functional::Tan, "Tan"); +UNARY_METHOD(PyTensorObject_atan, functional::Atan, "atan"); +UNARY_METHOD(PyTensorObject_sinh, functional::Sinh, "sinh"); +UNARY_METHOD(PyTensorObject_asinh, functional::Asinh, "asinh"); +UNARY_METHOD(PyTensorObject_cosh, functional::Cosh, "cosh"); +UNARY_METHOD(PyTensorObject_acosh, functional::Acosh, "acosh"); +UNARY_METHOD(PyTensorObject_tanh, functional::Tanh, "tanh"); +UNARY_METHOD(PyTensorObject_atanh, functional::Atanh, "atanh"); + +PyMethodDef PyTensorObject_extra_methods[] = { + {"byte", PyTensorObject_byte, METH_NOARGS, NULL}, + {"dim", PyTensorObject_dim, METH_NOARGS, NULL}, + {"ndimension", PyTensorObject_dim, METH_NOARGS, NULL}, + {"nelement", PyTensorObject_nelement, METH_NOARGS, NULL}, + {"numel", PyTensorObject_nelement, METH_NOARGS, NULL}, + {"element_size", PyTensorObject_element_size, METH_NOARGS, NULL}, + {"get_device", PyTensorObject_get_device, METH_NOARGS, NULL}, + {"abs", PyTensorObject_abs, METH_NOARGS, NULL}, + {"exp", PyTensorObject_exp, METH_NOARGS, NULL}, + {"floor", PyTensorObject_floor, METH_NOARGS, NULL}, + {"floor_", PyTensorObject_floor_, METH_NOARGS, NULL}, + {"acos", PyTensorObject_acos, METH_NOARGS, NULL}, + {"arccos", PyTensorObject_acos, METH_NOARGS, NULL}, + {"acosh", PyTensorObject_acosh, METH_NOARGS, NULL}, + {"arccosh", PyTensorObject_acosh, METH_NOARGS, NULL}, + {"atanh", PyTensorObject_atanh, METH_NOARGS, NULL}, + {"arctanh", PyTensorObject_atanh, METH_NOARGS, NULL}, + {"sign", PyTensorObject_sign, METH_NOARGS, NULL}, + {"sinh", PyTensorObject_sinh, METH_NOARGS, NULL}, + {"tan", PyTensorObject_tan, METH_NOARGS, NULL}, + {"gelu", PyTensorObject_gelu, METH_NOARGS, NULL}, + {"mish", PyTensorObject_mish, METH_NOARGS, NULL}, + {"negative", PyTensorObject_negative, METH_NOARGS, NULL}, + {"neg", PyTensorObject_negative, METH_NOARGS, NULL}, + {"sigmoid", PyTensorObject_sigmoid, METH_NOARGS, NULL}, + {"tanh", PyTensorObject_tanh, METH_NOARGS, NULL}, + {"silu", PyTensorObject_silu, METH_NOARGS, NULL}, + {"selu", PyTensorObject_selu, METH_NOARGS, NULL}, + {"softsign", PyTensorObject_softsign, METH_NOARGS, NULL}, + {"log1p", PyTensorObject_log1p, METH_NOARGS, NULL}, + {"log2", PyTensorObject_log2, METH_NOARGS, NULL}, + {"reciprocal", PyTensorObject_reciprocal, METH_NOARGS, NULL}, + {"asin", PyTensorObject_asin, METH_NOARGS, NULL}, + {"arcsin", PyTensorObject_asin, METH_NOARGS, NULL}, + {"asinh", PyTensorObject_asinh, METH_NOARGS, NULL}, + {"arcsinh", PyTensorObject_asinh, METH_NOARGS, NULL}, + {"atan", PyTensorObject_atan, METH_NOARGS, NULL}, + {"arctan", PyTensorObject_atan, METH_NOARGS, NULL}, + {"ceil", PyTensorObject_ceil, METH_NOARGS, NULL}, + {"cos", PyTensorObject_cos, METH_NOARGS, NULL}, + {"cosh", PyTensorObject_cosh, METH_NOARGS, NULL}, + {"erf", PyTensorObject_erf, METH_NOARGS, NULL}, + {"erfc", PyTensorObject_erfc, METH_NOARGS, NULL}, + {"erfinv", PyTensorObject_erfinv, METH_NOARGS, NULL}, + {"erfinv_", PyTensorObject_erfinv_, METH_NOARGS, NULL}, + {"expm1", PyTensorObject_expm1, METH_NOARGS, NULL}, + {"log", PyTensorObject_log, METH_NOARGS, NULL}, + {"rsqrt", PyTensorObject_rsqrt, METH_NOARGS, NULL}, + {"sqrt", PyTensorObject_sqrt, METH_NOARGS, NULL}, + {"square", PyTensorObject_square, METH_NOARGS, NULL}, + {"round", PyTensorObject_round, METH_NOARGS, NULL}, + {"t", PyTensorObject_t, METH_NOARGS, NULL}, + {"sin", PyTensorObject_sin, METH_NOARGS, NULL}, + {"sin_", PyTensorObject_sin_, METH_NOARGS, NULL}, + {"isnan", PyTensorObject_isnan, METH_NOARGS, NULL}, + {"isinf", PyTensorObject_isinf, METH_NOARGS, NULL}, +}; + + +} // namespace one +} // namespace oneflow + +#undef ASSERT +#undef ASSERT_PTR \ No newline at end of file diff --git a/oneflow/api/python/framework/tensor_functions.h b/oneflow/api/python/framework/tensor_functions.h new file mode 100644 index 00000000000..83acb92f10a --- /dev/null +++ b/oneflow/api/python/framework/tensor_functions.h @@ -0,0 +1,31 @@ +/* +Copyright 2020 The OneFlow Authors. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +#ifndef ONEFLOW_API_PYTHON_FRAMEWORK_TENSOR_FUNCTIONS_H +#define ONEFLOW_API_PYTHON_FRAMEWORK_TENSOR_FUNCTIONS_H + +#include + + +namespace oneflow { +namespace one { + +extern PyNumberMethods PyTensorObject_as_number; +extern PyMethodDef PyTensorObject_extra_methods[]; + +} // namespace one +} // namespace oneflow + +#endif // ONEFLOW_API_PYTHON_FRAMEWORK_TENSOR_FUNCTIONS_H_ From 8ed63874a59b2bdcf54e82ca7a51b9c13c41f9af Mon Sep 17 00:00:00 2001 From: WangYi Date: Mon, 23 May 2022 22:52:59 +0800 Subject: [PATCH 02/50] concat py methods --- oneflow/api/python/framework/tensor.cpp | 32 ++++- .../api/python/framework/tensor_functions.cpp | 35 ++++- .../api/python/framework/tensor_functions.h | 2 +- python/oneflow/framework/tensor.py | 129 +++--------------- 4 files changed, 82 insertions(+), 116 deletions(-) mode change 100644 => 100755 python/oneflow/framework/tensor.py diff --git a/oneflow/api/python/framework/tensor.cpp b/oneflow/api/python/framework/tensor.cpp index 59602cefdad..599ffabba38 100644 --- a/oneflow/api/python/framework/tensor.cpp +++ b/oneflow/api/python/framework/tensor.cpp @@ -15,6 +15,7 @@ limitations under the License. */ #include "oneflow/api/python/framework/tensor.h" +#include #include #include #include "oneflow/api/python/exception/exception.h" @@ -35,6 +36,7 @@ limitations under the License. #include "oneflow/core/framework/placement_utils.h" #include "oneflow/core/functional/functional.h" #include "oneflow/core/functional/tensor_index.h" +#include "oneflow/api/python/framework/tensor_functions.h" namespace py = pybind11; @@ -320,7 +322,23 @@ static PyObject* PyTensorObject__register_storage_delete_hook(PyObject* self, Py END_HANDLE_ERRORS } -static PyMethodDef PyTensorObject_methods[] = { +PyMethodDef* PyTensorObject_methods = NULL; + +PyMethodDef* concat_method_def(PyMethodDef original_methods[], PyMethodDef extra_methods[]) { + int len1 = 0; + int len2 = 0; + PyMethodDef* p1 = original_methods; + PyMethodDef* p2 = extra_methods; + while ((p1++)->ml_name != NULL) { len1++; } + while ((p2++)->ml_name != NULL) { len2++; } + PyMethodDef* total_methods = new PyMethodDef[len1 + len2 + 1]; + for (int i = 0; i < len1; i++) total_methods[i] = original_methods[i]; + for (int i = 0; i < len2; i++) total_methods[i + len1] = extra_methods[i]; + total_methods[len1 + len2] = {NULL}; + return total_methods; +} + +static PyMethodDef PyTensorObject_original_methods[] = { {"storage_offset", PyTensorObject_storage_offset, METH_NOARGS, NULL}, {"stride", PyTensorObject_stride, METH_NOARGS, NULL}, {"is_contiguous", PyTensorObject_is_contiguous, METH_NOARGS, NULL}, @@ -531,6 +549,10 @@ static PyHeapTypeObject* MakeTensorMetaclass() { return heap_type; } +extern PyNumberMethods PyTensorObject_as_number; +extern PyObject* PyTensorObject_richcompare(PyObject*, PyObject*, int); +extern PyMethodDef PyTensorObject_extra_methods[]; + static PyHeapTypeObject* TensorMetaclass_Type = MakeTensorMetaclass(); static PyTypeObject* MakeTensorType() { @@ -548,11 +570,15 @@ static PyTypeObject* MakeTensorType() { type->tp_init = PyTensorObject_init; type->tp_dealloc = PyTensorObject_dealloc; type->tp_getset = PyTensorObject_properties; - type->tp_methods = PyTensorObject_methods; + type->tp_methods = PyTensorObject_original_methods; + // type->tp_methods = + // concat_method_def(PyTensorObject_original_methods, PyTensorObject_extra_methods); - type->tp_as_number = &heap_type->as_number; + // type->tp_as_number = &heap_type->as_number; + type->tp_as_number = &PyTensorObject_as_number; type->tp_as_sequence = &PyTensorObject_as_sequence; type->tp_as_mapping = &PyTensorObject_as_mapping; + // type->tp_richcompare = PyTensorObject_richcompare; type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index b3cc32858b5..01d80dc0a05 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -42,22 +42,22 @@ namespace one { #define ASSERT(x) (x).GetOrThrow() #define ASSERT_PTR(x) (x).GetPtrOrThrow() -#define NB_UNARY_FUNC(func_name, bind_func, name) \ +#define NB_UNARY_FUNC(func_name, bind_func, name) \ static PyObject* func_name(PyObject* self) { \ HANDLE_ERRORS \ PyObject* tuple = PyTuple_Pack(1, self); \ - std::cout << "cpython " << name << std::endl; \ + std::cout << "cpython " << name << std::endl; \ auto* result = bind_func(NULL, tuple, NULL); \ if (PyErr_Occurred()) { throw py::error_already_set(); } \ return result; \ END_HANDLE_ERRORS \ } -#define NB_BINARY_FUNC(func_name, bind_func, name) \ +#define NB_BINARY_FUNC(func_name, bind_func, name) \ static PyObject* func_name(PyObject* a, PyObject* b) { \ HANDLE_ERRORS \ PyObject* tuple = PyTuple_Pack(2, a, b); \ - std::cout << "cpython " << name << std::endl; \ + std::cout << "cpython " << name << std::endl; \ auto* result = bind_func(NULL, tuple, NULL); \ if (PyErr_Occurred()) { throw py::error_already_set(); } \ return result; \ @@ -180,7 +180,6 @@ PyNumberMethods PyTensorObject_as_number = { }; - // extra methods static PyObject* PyTensorObject_byte(PyObject* self, PyObject* unused) { @@ -328,8 +327,34 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"sin_", PyTensorObject_sin_, METH_NOARGS, NULL}, {"isnan", PyTensorObject_isnan, METH_NOARGS, NULL}, {"isinf", PyTensorObject_isinf, METH_NOARGS, NULL}, + + {"floor_divide", PyTensorObject_div, METH_O, NULL}, + {"floor", PyTensorObject_floor, METH_NOARGS, NULL}, + {"floor_", PyTensorObject_floor_, METH_NOARGS, NULL}, + {NULL}, }; +// tp_richcompare + +PyObject* PyTensorObject_richcompare(PyObject* self, PyObject* other, int op) { + std::cout << "cpython compare" << std::endl; + PyObject* tuple = PyTuple_Pack(2, self, other); + + switch (op) { + case Py_LT: return functional::less(NULL, tuple, NULL); + case Py_LE: return functional::less_equal(NULL, tuple, NULL); + case Py_EQ: { + if (self == Py_None || other == Py_None) return Py_False; + return functional::equal(NULL, tuple, NULL); + } + case Py_NE: return functional::not_equal(NULL, tuple, NULL); + case Py_GT: return functional::greater(NULL, tuple, NULL); + case Py_GE: return functional::greater_equal(NULL, tuple, NULL); + } + return NULL; +} + +// normal methods } // namespace one } // namespace oneflow diff --git a/oneflow/api/python/framework/tensor_functions.h b/oneflow/api/python/framework/tensor_functions.h index 83acb92f10a..6b1359ab94d 100644 --- a/oneflow/api/python/framework/tensor_functions.h +++ b/oneflow/api/python/framework/tensor_functions.h @@ -18,12 +18,12 @@ limitations under the License. #include - namespace oneflow { namespace one { extern PyNumberMethods PyTensorObject_as_number; extern PyMethodDef PyTensorObject_extra_methods[]; +extern PyObject* PyTensorObject_richcompare(PyObject*, PyObject*, int); } // namespace one } // namespace oneflow diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py old mode 100644 new mode 100755 index d645227cd03..353a13352ce --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -276,7 +276,7 @@ def _rtruediv(self, other): def _floor_divide(self, other): - return flow.floor_divide(self, other) + return flow._C.floor_divide(self, other) def _floor(self): @@ -1159,103 +1159,43 @@ def _cumprod(self, dim, dtype=None): def RegisterMethods(): - Tensor.__mul__ = lambda self, other: self.mul(other) - Tensor.__rmul__ = lambda self, other: self.mul(other) - Tensor.__add__ = lambda self, other: self.add(other) Tensor.__iadd__ = lambda self, other: self.add_(other) - Tensor.__matmul__ = lambda self, other: self.matmul(other) - Tensor.byte = _byte - Tensor.ndim = property(_ndim) - Tensor.numpy = _numpy - Tensor.size = _size - Tensor.dim = _ndim - Tensor.ndimension = _ndim - Tensor.nelement = _nelement - Tensor.numel = _numel - Tensor.element_size = _element_size - Tensor.backward = _backward - Tensor.__setitem__ = _setitem + Tensor.numpy = _numpy # 不会 + Tensor.size = _size # 不会 + Tensor.backward = _backward # 不会 + Tensor.__setitem__ = _setitem # 不会 Tensor.__str__ = _str Tensor.__repr__ = _repr - Tensor.__eq__ = _eq - Tensor.__ne__ = _ne Tensor.__bool__ = is_nonzero - Tensor.__gt__ = _gt - Tensor.__lt__ = _lt - Tensor.__ge__ = _ge - Tensor.__le__ = _le - Tensor.__and__ = _and - Tensor.__or__ = _or - Tensor.__xor__ = _xor - Tensor.__mul__ = _mul - Tensor.__rmul__ = _rmul - Tensor.__add__ = _add Tensor.__iadd__ = _iadd - Tensor.__radd__ = _radd - Tensor.addmm = _addmm - Tensor.__sub__ = _sub - Tensor.__rsub__ = _rsub - Tensor.__truediv__ = _truediv - Tensor.__rtruediv__ = _rtruediv - Tensor.__neg__ = _neg - Tensor.__pow__ = _pow - Tensor.__rpow__ = _rpow - Tensor.__format__ = _format - Tensor.__floordiv__ = _floor_divide - Tensor.__mod__ = _fmod - Tensor.__index__ = _index - Tensor.__invert__ = _invert - Tensor.__float__ = _scalar_float - Tensor.__int__ = _scalar_int - Tensor.__array__ = _numpy - Tensor.uniform_ = _uniform - Tensor.trunc_normal_ = _trunc_normal_ - Tensor.kaiming_uniform_ = _kaiming_uniform - Tensor.kaiming_normal_ = _kaiming_normal - Tensor.xavier_normal_ = _xavier_normal - Tensor.xavier_uniform_ = _xavier_uniform - Tensor.orthogonal_ = _orthogonal - Tensor.normal_ = _normal - Tensor.fill_ = _fill - Tensor.copy_ = _copy - Tensor.get_device = _get_device - Tensor._meta_repr = _meta_repr - Tensor.abs = _abs - Tensor.exp = _exp - Tensor.floor_divide = _floor_divide - Tensor.floor = _floor - Tensor.floor_ = _floor_inplace_ + Tensor.addmm = _addmm # 代码太复杂 + Tensor.__format__ = _format # 不会 numpy + Tensor.__index__ = _index # buhui numpy + Tensor.__float__ = _scalar_float # numpy + Tensor.__int__ = _scalar_int # numpy + Tensor.__array__ = _numpy # numpy + Tensor.uniform_ = _uniform # init + Tensor.trunc_normal_ = _trunc_normal_ # init + Tensor.kaiming_uniform_ = _kaiming_uniform # init + Tensor.kaiming_normal_ = _kaiming_normal # init + Tensor.xavier_normal_ = _xavier_normal # init + Tensor.xavier_uniform_ = _xavier_uniform # init + Tensor.orthogonal_ = _orthogonal # fuza + Tensor.normal_ = _normal # init + Tensor.fill_ = _fill # init + Tensor.copy_ = _copy # fuza + Tensor._meta_repr = _meta_repr # 没有接口 Tensor.argmax = _argmax Tensor.argmin = _argmin Tensor.argsort = _argsort Tensor.argwhere = _argwhere - Tensor.acos = _acos - Tensor.arccos = _arccos - Tensor.acosh = _acosh Tensor.amin = _amin - Tensor.arccosh = _arccosh - Tensor.atanh = _atanh Tensor.atan2 = _atan2 - Tensor.arctanh = _arctanh - Tensor.sign = _sign - Tensor.sinh = _sinh - Tensor.tan = _tan Tensor.gt = _gt Tensor.ge = _ge - Tensor.gelu = _gelu - Tensor.mish = _mish - Tensor.negative = _negative - Tensor.neg = _neg - Tensor.sigmoid = _sigmoid - Tensor.tanh = _tanh - Tensor.silu = _silu - Tensor.selu = _selu - Tensor.softsign = _softsign Tensor.cast = _cast Tensor.diag = _diag Tensor.diagonal = _diagonal - Tensor.log1p = _log1p - Tensor.log2 = _log2 Tensor.add = _add Tensor.add_ = _add_inplace Tensor.addcmul = _addcmul @@ -1264,50 +1204,30 @@ def RegisterMethods(): Tensor.div_ = _truediv_inplace Tensor.mul = _mul Tensor.mul_ = _mul_ - Tensor.reciprocal = _reciprocal Tensor.sub = _sub Tensor.sub_ = _sub_inplace - Tensor.asin = _asin - Tensor.arcsin = _arcsin - Tensor.asinh = _asinh - Tensor.arcsinh = _arcsinh - Tensor.atan = _atan - Tensor.arctan = _arctan - Tensor.ceil = _ceil Tensor.clamp = _clamp Tensor.clamp_ = _clamp_ Tensor.clip = _clip Tensor.clip_ = _clip_ - Tensor.cos = _cos - Tensor.cosh = _cosh Tensor.cpu = _cpu Tensor.cuda = _cuda Tensor.expand = _expand Tensor.expand_as = _expand_as - Tensor.erf = _erf - Tensor.erfc = _erfc - Tensor.erfinv = _erfinv - Tensor.erfinv_ = _erfinv_inplace - Tensor.expm1 = _expm1 Tensor.fmod = _fmod Tensor.flatten = _flatten Tensor.flip = _flip Tensor.in_top_k = _in_top_k Tensor.index_select = _index_select - Tensor.log = _log Tensor.minimum = _minimum Tensor.maximum = _maximum Tensor.new_empty = _new_empty Tensor.new_ones = _new_ones Tensor.new_zeros = _new_zeros Tensor.pow = _pow - Tensor.rsqrt = _rsqrt - Tensor.sqrt = _sqrt - Tensor.square = _square Tensor.var = _var Tensor.std = _std Tensor.matmul = _matmul - Tensor.round = _round Tensor.softplus = _softplus Tensor.tril = _tril Tensor.triu = _triu @@ -1346,7 +1266,6 @@ def RegisterMethods(): Tensor.all = _all Tensor.any = _any Tensor.T = property(_T) - Tensor.t = _t Tensor.masked_fill = _masked_fill Tensor.masked_select = _masked_select Tensor.eq = _eq @@ -1376,12 +1295,8 @@ def RegisterMethods(): Tensor.sum = _sum Tensor.mean = _mean Tensor.prod = _prod - Tensor.sin = _sin - Tensor.sin_ = _sin_inplace Tensor.is_consistent = _is_consistent Tensor.to_consistent = _to_consistent - Tensor.isnan = _isnan - Tensor.isinf = _isinf Tensor.new_tensor = _new_tensor Tensor.cumsum = _cumsum Tensor.cumprod = _cumprod From 603684eefe80c3652bc20812516cff2b715c93ce Mon Sep 17 00:00:00 2001 From: WangYi Date: Tue, 24 May 2022 15:32:01 +0800 Subject: [PATCH 03/50] add hash, restore tensor.py --- oneflow/api/python/framework/tensor.cpp | 10 +- python/oneflow/framework/tensor.py | 128 ++++++++++++++++++++---- 2 files changed, 112 insertions(+), 26 deletions(-) diff --git a/oneflow/api/python/framework/tensor.cpp b/oneflow/api/python/framework/tensor.cpp index 599ffabba38..bbd149f5ca0 100644 --- a/oneflow/api/python/framework/tensor.cpp +++ b/oneflow/api/python/framework/tensor.cpp @@ -18,6 +18,7 @@ limitations under the License. #include #include #include +#include #include "oneflow/api/python/exception/exception.h" #include "oneflow/api/python/framework/size.h" #include "oneflow/api/python/functional/common.h" @@ -570,15 +571,16 @@ static PyTypeObject* MakeTensorType() { type->tp_init = PyTensorObject_init; type->tp_dealloc = PyTensorObject_dealloc; type->tp_getset = PyTensorObject_properties; - type->tp_methods = PyTensorObject_original_methods; - // type->tp_methods = - // concat_method_def(PyTensorObject_original_methods, PyTensorObject_extra_methods); + // type->tp_methods = PyTensorObject_original_methods; + type->tp_methods = + concat_method_def(PyTensorObject_original_methods, PyTensorObject_extra_methods); // type->tp_as_number = &heap_type->as_number; type->tp_as_number = &PyTensorObject_as_number; type->tp_as_sequence = &PyTensorObject_as_sequence; type->tp_as_mapping = &PyTensorObject_as_mapping; - // type->tp_richcompare = PyTensorObject_richcompare; + type->tp_richcompare = PyTensorObject_richcompare; + type->tp_hash = (hashfunc)_Py_HashPointer; type->tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_HEAPTYPE; diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index 353a13352ce..68065fe10f3 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -1159,43 +1159,103 @@ def _cumprod(self, dim, dtype=None): def RegisterMethods(): + Tensor.__mul__ = lambda self, other: self.mul(other) + Tensor.__rmul__ = lambda self, other: self.mul(other) + Tensor.__add__ = lambda self, other: self.add(other) Tensor.__iadd__ = lambda self, other: self.add_(other) - Tensor.numpy = _numpy # 不会 - Tensor.size = _size # 不会 - Tensor.backward = _backward # 不会 - Tensor.__setitem__ = _setitem # 不会 + Tensor.__matmul__ = lambda self, other: self.matmul(other) + Tensor.byte = _byte + Tensor.ndim = property(_ndim) + Tensor.numpy = _numpy + Tensor.size = _size + Tensor.dim = _ndim + Tensor.ndimension = _ndim + Tensor.nelement = _nelement + Tensor.numel = _numel + Tensor.element_size = _element_size + Tensor.backward = _backward + Tensor.__setitem__ = _setitem Tensor.__str__ = _str Tensor.__repr__ = _repr + Tensor.__eq__ = _eq + Tensor.__ne__ = _ne Tensor.__bool__ = is_nonzero + Tensor.__gt__ = _gt + Tensor.__lt__ = _lt + Tensor.__ge__ = _ge + Tensor.__le__ = _le + Tensor.__and__ = _and + Tensor.__or__ = _or + Tensor.__xor__ = _xor + Tensor.__mul__ = _mul + Tensor.__rmul__ = _rmul + Tensor.__add__ = _add Tensor.__iadd__ = _iadd - Tensor.addmm = _addmm # 代码太复杂 - Tensor.__format__ = _format # 不会 numpy - Tensor.__index__ = _index # buhui numpy - Tensor.__float__ = _scalar_float # numpy - Tensor.__int__ = _scalar_int # numpy - Tensor.__array__ = _numpy # numpy - Tensor.uniform_ = _uniform # init - Tensor.trunc_normal_ = _trunc_normal_ # init - Tensor.kaiming_uniform_ = _kaiming_uniform # init - Tensor.kaiming_normal_ = _kaiming_normal # init - Tensor.xavier_normal_ = _xavier_normal # init - Tensor.xavier_uniform_ = _xavier_uniform # init - Tensor.orthogonal_ = _orthogonal # fuza - Tensor.normal_ = _normal # init - Tensor.fill_ = _fill # init - Tensor.copy_ = _copy # fuza - Tensor._meta_repr = _meta_repr # 没有接口 + Tensor.__radd__ = _radd + Tensor.addmm = _addmm + Tensor.__sub__ = _sub + Tensor.__rsub__ = _rsub + Tensor.__truediv__ = _truediv + Tensor.__rtruediv__ = _rtruediv + Tensor.__neg__ = _neg + Tensor.__pow__ = _pow + Tensor.__rpow__ = _rpow + Tensor.__format__ = _format + Tensor.__floordiv__ = _floor_divide + Tensor.__mod__ = _fmod + Tensor.__index__ = _index + Tensor.__invert__ = _invert + Tensor.__float__ = _scalar_float + Tensor.__int__ = _scalar_int + Tensor.__array__ = _numpy + Tensor.uniform_ = _uniform + Tensor.trunc_normal_ = _trunc_normal_ + Tensor.kaiming_uniform_ = _kaiming_uniform + Tensor.kaiming_normal_ = _kaiming_normal + Tensor.xavier_normal_ = _xavier_normal + Tensor.xavier_uniform_ = _xavier_uniform + Tensor.orthogonal_ = _orthogonal + Tensor.normal_ = _normal + Tensor.fill_ = _fill + Tensor.copy_ = _copy + Tensor.get_device = _get_device + Tensor._meta_repr = _meta_repr + Tensor.abs = _abs + Tensor.exp = _exp + Tensor.floor_divide = _floor_divide + Tensor.floor = _floor + Tensor.floor_ = _floor_inplace_ Tensor.argmax = _argmax Tensor.argmin = _argmin Tensor.argsort = _argsort Tensor.argwhere = _argwhere + Tensor.acos = _acos + Tensor.arccos = _arccos + Tensor.acosh = _acosh Tensor.amin = _amin + Tensor.arccosh = _arccosh + Tensor.atanh = _atanh Tensor.atan2 = _atan2 + Tensor.arctanh = _arctanh + Tensor.sign = _sign + Tensor.sinh = _sinh + Tensor.tan = _tan Tensor.gt = _gt Tensor.ge = _ge + Tensor.gelu = _gelu + Tensor.mish = _mish + Tensor.negative = _negative + Tensor.neg = _neg + Tensor.sigmoid = _sigmoid + Tensor.tanh = _tanh + Tensor.silu = _silu + Tensor.selu = _selu + Tensor.softsign = _softsign Tensor.cast = _cast Tensor.diag = _diag Tensor.diagonal = _diagonal + Tensor.log1p = _log1p + Tensor.log2 = _log2 Tensor.add = _add Tensor.add_ = _add_inplace Tensor.addcmul = _addcmul @@ -1204,30 +1264,50 @@ def RegisterMethods(): Tensor.div_ = _truediv_inplace Tensor.mul = _mul Tensor.mul_ = _mul_ + Tensor.reciprocal = _reciprocal Tensor.sub = _sub Tensor.sub_ = _sub_inplace + Tensor.asin = _asin + Tensor.arcsin = _arcsin + Tensor.asinh = _asinh + Tensor.arcsinh = _arcsinh + Tensor.atan = _atan + Tensor.arctan = _arctan + Tensor.ceil = _ceil Tensor.clamp = _clamp Tensor.clamp_ = _clamp_ Tensor.clip = _clip Tensor.clip_ = _clip_ + Tensor.cos = _cos + Tensor.cosh = _cosh Tensor.cpu = _cpu Tensor.cuda = _cuda Tensor.expand = _expand Tensor.expand_as = _expand_as + Tensor.erf = _erf + Tensor.erfc = _erfc + Tensor.erfinv = _erfinv + Tensor.erfinv_ = _erfinv_inplace + Tensor.expm1 = _expm1 Tensor.fmod = _fmod Tensor.flatten = _flatten Tensor.flip = _flip Tensor.in_top_k = _in_top_k Tensor.index_select = _index_select + Tensor.log = _log Tensor.minimum = _minimum Tensor.maximum = _maximum Tensor.new_empty = _new_empty Tensor.new_ones = _new_ones Tensor.new_zeros = _new_zeros Tensor.pow = _pow + Tensor.rsqrt = _rsqrt + Tensor.sqrt = _sqrt + Tensor.square = _square Tensor.var = _var Tensor.std = _std Tensor.matmul = _matmul + Tensor.round = _round Tensor.softplus = _softplus Tensor.tril = _tril Tensor.triu = _triu @@ -1266,6 +1346,7 @@ def RegisterMethods(): Tensor.all = _all Tensor.any = _any Tensor.T = property(_T) + Tensor.t = _t Tensor.masked_fill = _masked_fill Tensor.masked_select = _masked_select Tensor.eq = _eq @@ -1295,13 +1376,16 @@ def RegisterMethods(): Tensor.sum = _sum Tensor.mean = _mean Tensor.prod = _prod + Tensor.sin = _sin + Tensor.sin_ = _sin_inplace Tensor.is_consistent = _is_consistent Tensor.to_consistent = _to_consistent + Tensor.isnan = _isnan + Tensor.isinf = _isinf Tensor.new_tensor = _new_tensor Tensor.cumsum = _cumsum Tensor.cumprod = _cumprod - def register_tensor_op(op_name): def set_tensor_op(method): setattr(Tensor, op_name, method) From 1ecd73e1e69d1fc8f9f889a548c854c70fbb2493 Mon Sep 17 00:00:00 2001 From: WangYi Date: Wed, 25 May 2022 11:02:06 +0800 Subject: [PATCH 04/50] check replacement --- .../api/python/framework/tensor_functions.cpp | 204 ++++++++---------- python/oneflow/framework/tensor.py | 167 +++++++------- python/oneflow/nn/modules/math_ops.py | 4 +- 3 files changed, 173 insertions(+), 202 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 01d80dc0a05..9c0e921843a 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -14,27 +14,11 @@ See the License for the specific language governing permissions and limitations under the License. */ -#include #include #include "oneflow/api/python/exception/exception.h" -#include "oneflow/api/python/framework/size.h" -#include "oneflow/api/python/framework/tensor.h" #include "oneflow/api/python/functional/common.h" -#include "oneflow/api/python/functional/python_arg.h" #include "oneflow/api/python/functional/functional_api.yaml.pybind.h" -#include "oneflow/api/python/functional/tensor_api.yaml.pybind.h" -#include "oneflow/api/python/of_api_registry.h" -#include "oneflow/api/python/ofblob/ofblob.e.h" -#include "oneflow/api/python/utils/tensor_utils.h" -#include "oneflow/core/autograd/autograd_engine.h" -#include "oneflow/core/framework/tensor.h" -#include "oneflow/core/framework/tensor_rpc_util.h" -#include "oneflow/core/framework/device.h" -#include "oneflow/core/common/stride.h" -#include "oneflow/core/framework/dtype.h" -#include "oneflow/core/framework/placement_utils.h" #include "oneflow/core/functional/functional.h" -#include "oneflow/core/functional/tensor_index.h" namespace oneflow { namespace one { @@ -42,45 +26,50 @@ namespace one { #define ASSERT(x) (x).GetOrThrow() #define ASSERT_PTR(x) (x).GetPtrOrThrow() -#define NB_UNARY_FUNC(func_name, bind_func, name) \ +#define NB_UNARY_FUNC(func_name, bind_func) \ static PyObject* func_name(PyObject* self) { \ HANDLE_ERRORS \ PyObject* tuple = PyTuple_Pack(1, self); \ - std::cout << "cpython " << name << std::endl; \ auto* result = bind_func(NULL, tuple, NULL); \ if (PyErr_Occurred()) { throw py::error_already_set(); } \ return result; \ END_HANDLE_ERRORS \ } -#define NB_BINARY_FUNC(func_name, bind_func, name) \ +#define NB_BINARY_FUNC(func_name, bind_func) \ static PyObject* func_name(PyObject* a, PyObject* b) { \ HANDLE_ERRORS \ PyObject* tuple = PyTuple_Pack(2, a, b); \ - std::cout << "cpython " << name << std::endl; \ auto* result = bind_func(NULL, tuple, NULL); \ if (PyErr_Occurred()) { throw py::error_already_set(); } \ return result; \ END_HANDLE_ERRORS \ } -NB_BINARY_FUNC(PyTensorObject_add, functional::add, "add"); -NB_BINARY_FUNC(PyTensorObject_sub, functional::sub, "sub"); -NB_BINARY_FUNC(PyTensorObject_mul, functional::mul, "mul"); -NB_BINARY_FUNC(PyTensorObject_fmod, functional::fmod, "fmod"); -NB_BINARY_FUNC(PyTensorObject_div, functional::div, "div"); +NB_UNARY_FUNC(PyTensorObject_absolute, functional::abs); +NB_UNARY_FUNC(PyTensorObject_negative, functional::negative); + +NB_BINARY_FUNC(PyTensorObject_add, functional::add); +NB_BINARY_FUNC(PyTensorObject_sub, functional::sub); +NB_BINARY_FUNC(PyTensorObject_mul, functional::mul); +NB_BINARY_FUNC(PyTensorObject_fmod, functional::fmod); +NB_BINARY_FUNC(PyTensorObject_div, functional::div); +NB_BINARY_FUNC(PyTensorObject_and, functional::logical_and); +NB_BINARY_FUNC(PyTensorObject_xor, functional::logical_xor); +NB_BINARY_FUNC(PyTensorObject_or, functional::logical_or); +NB_BINARY_FUNC(PyTensorObject_floor_div, functional::floor_divide); +NB_BINARY_FUNC(PyTensorObject_true_div, functional::div); +NB_BINARY_FUNC(PyTensorObject_matrix_multiply, functional::matmul); +// TODO: not implemented yet +// NB_UNARY_FUNC(PyTensorObject_positive, functional::positive); PyObject* PyTensorObject_pow(PyObject* a, PyObject* b, PyObject* unsed) { HANDLE_ERRORS PyObject* tuple = PyTuple_Pack(2, a, b); auto* result = functional::pow(NULL, tuple, NULL); if (PyErr_Occurred()) { throw py::error_already_set(); } - // std::cout << "using cpython pow" << std::endl; return result; END_HANDLE_ERRORS } -NB_UNARY_FUNC(PyTensorObject_negative, functional::negative, "negative"); -// NB_UNARY_FUNC(PyTensorObject_positive, functional::positive); -NB_UNARY_FUNC(PyTensorObject_absolute, functional::abs, "abs"); static PyObject* PyTensorObject_invert(PyObject* self) { HANDLE_ERRORS @@ -89,16 +78,11 @@ static PyObject* PyTensorObject_invert(PyObject* self) { PyObject* tuple = PyTuple_Pack(1, self); auto* result = functional::logical_not(NULL, tuple, NULL); if (PyErr_Occurred()) { throw py::error_already_set(); } - std::cout << "using cpython invert" << std::endl; return result; END_HANDLE_ERRORS } -NB_BINARY_FUNC(PyTensorObject_and, functional::logical_and, "logical_and"); -NB_BINARY_FUNC(PyTensorObject_xor, functional::logical_xor, "logical_xor"); -NB_BINARY_FUNC(PyTensorObject_or, functional::logical_or, "logical or"); - -#define INPLACE_BINARY_FUNC(func_name, bind_func, name) \ +#define NB_INPLACE_BINARY_FUNC(func_name, bind_func) \ static PyObject* func_name(PyObject* a, PyObject* b) { \ HANDLE_ERRORS \ PyObject* tuple = PyTuple_Pack(2, a, b); \ @@ -110,10 +94,18 @@ NB_BINARY_FUNC(PyTensorObject_or, functional::logical_or, "logical or"); END_HANDLE_ERRORS \ } -INPLACE_BINARY_FUNC(PyTensorObject_inplace_add, functional::add, "add"); -INPLACE_BINARY_FUNC(PyTensorObject_inplace_sub, functional::sub, "sub"); -INPLACE_BINARY_FUNC(PyTensorObject_inplace_mul, functional::mul, "mul"); -INPLACE_BINARY_FUNC(PyTensorObject_inplace_fmod, functional::fmod, "fmod"); +// TODO: still have bug here +// NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_add, functional::add, "add"); +NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_sub, functional::sub); +NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_mul, functional::mul); +NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_fmod, functional::fmod); +NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_and, functional::logical_and); +NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_xor, functional::logical_xor); +NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_or, functional::logical_or); +NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_floor_div, functional::floor_divide); +NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_true_div, functional::div); +// TODO: inplace matmul not supported yet +// INPLACE_BINARY_FUNC(PyTensorObject_inplace_matrix_multiply, functional::matmul, "matmul"); PyObject* PyTensorObject_inplace_pow(PyObject* a, PyObject* b, PyObject* unsed) { HANDLE_ERRORS @@ -122,32 +114,21 @@ PyObject* PyTensorObject_inplace_pow(PyObject* a, PyObject* b, PyObject* unsed) CHECK_OR_THROW(PyDict_SetItemString(dict, "inplace", Py_True) > -1); auto* result = functional::pow(NULL, tuple, NULL); if (PyErr_Occurred()) { throw py::error_already_set(); } - // std::cout << "using cpython bind_func" << std::endl; return result; END_HANDLE_ERRORS } -INPLACE_BINARY_FUNC(PyTensorObject_inplace_and, functional::logical_and, "logical_and"); -INPLACE_BINARY_FUNC(PyTensorObject_inplace_xor, functional::logical_xor, "logical_xor"); -INPLACE_BINARY_FUNC(PyTensorObject_inplace_or, functional::logical_or, "logical_or"); -NB_BINARY_FUNC(PyTensorObject_floor_div, functional::floor_divide, "floor divide"); -NB_BINARY_FUNC(PyTensorObject_true_div, functional::div, "true_div"); -INPLACE_BINARY_FUNC(PyTensorObject_inplace_floor_div, functional::floor_divide, "floor_divide"); -INPLACE_BINARY_FUNC(PyTensorObject_inplace_true_div, functional::div, "true_divide"); -NB_BINARY_FUNC(PyTensorObject_matrix_multiply, functional::matmul, "matmul"); -// INPLACE_BINARY_FUNC(PyTensorObject_inplace_matrix_multiply, functional::matmul, "matmul"); - PyNumberMethods PyTensorObject_as_number = { - PyTensorObject_add, // nb_add, __add__ - PyTensorObject_sub, // nb_subtract, __sub__ - PyTensorObject_mul, // nb_multiply, __mul__ - PyTensorObject_fmod, // nb_remainder, __mod__, __rmod__ + PyTensorObject_add, // nb_add + PyTensorObject_sub, // nb_subtract + PyTensorObject_mul, // nb_multiply + PyTensorObject_fmod, // nb_remainder NULL, // nb_divmod PyTensorObject_pow, // nb_power PyTensorObject_negative, // nb_negative NULL, // nb_positive PyTensorObject_absolute, // nb_absolute - NULL, // nb_bool torch doesn't implement + NULL, // nb_bool PyTensorObject_invert, // nb_invert NULL, // nb_lshift NULL, // nb_rshift @@ -158,7 +139,7 @@ PyNumberMethods PyTensorObject_as_number = { NULL, // nb_reserved NULL, // nb_float - NULL, // bug PyTensorObject_inplace_add, //nb_inplace_add + NULL, // nb_inplace_add PyTensorObject_inplace_sub, // nb_inplace_sub PyTensorObject_inplace_mul, // nb_inplace_mul PyTensorObject_inplace_fmod, // nb_inplace_remainder @@ -181,38 +162,82 @@ PyNumberMethods PyTensorObject_as_number = { }; // extra methods +#define UNARY_METHOD(func_name, bind_func) \ + static PyObject* func_name(PyObject* self, PyObject* unused) { \ + HANDLE_ERRORS \ + return PyTensor_New(ASSERT_PTR(bind_func(PyTensor_Unpack(self)))); \ + END_HANDLE_ERRORS \ + } + +UNARY_METHOD(PyTensorObject_abs, functional::Abs); +UNARY_METHOD(PyTensorObject_exp, functional::Exp); +UNARY_METHOD(PyTensorObject_floor, functional::Floor); +UNARY_METHOD(PyTensorObject_floor_, functional::Floor_); +UNARY_METHOD(PyTensorObject_sign, functional::Sign); +UNARY_METHOD(PyTensorObject_gelu, functional::Gelu); +UNARY_METHOD(PyTensorObject_mish, functional::Mish); +UNARY_METHOD(PyTensorObject_negative, functional::Negative); +UNARY_METHOD(PyTensorObject_sigmoid, functional::Sigmoid); +UNARY_METHOD(PyTensorObject_silu, functional::Silu); +UNARY_METHOD(PyTensorObject_selu, functional::Selu); +UNARY_METHOD(PyTensorObject_softsign, functional::SoftSign); +UNARY_METHOD(PyTensorObject_log1p, functional::Log1p); +UNARY_METHOD(PyTensorObject_log2, functional::Log2); +UNARY_METHOD(PyTensorObject_reciprocal, functional::Reciprocal); +UNARY_METHOD(PyTensorObject_ceil, functional::Ceil); +UNARY_METHOD(PyTensorObject_erf, functional::Erf); +UNARY_METHOD(PyTensorObject_erfc, functional::Erfc); +UNARY_METHOD(PyTensorObject_erfinv, functional::Erfinv); +UNARY_METHOD(PyTensorObject_erfinv_, functional::ErfinvInplace); +UNARY_METHOD(PyTensorObject_expm1, functional::Expm1); +UNARY_METHOD(PyTensorObject_log, functional::Log); +UNARY_METHOD(PyTensorObject_rsqrt, functional::Rsqrt); +UNARY_METHOD(PyTensorObject_sqrt, functional::Sqrt); +UNARY_METHOD(PyTensorObject_square, functional::Square); +UNARY_METHOD(PyTensorObject_round, functional::Round); +UNARY_METHOD(PyTensorObject_t, functional::TransposeAllDimFunction); +UNARY_METHOD(PyTensorObject_isnan, functional::IsNan); +UNARY_METHOD(PyTensorObject_isinf, functional::IsInf); +UNARY_METHOD(PyTensorObject_sin, functional::Sin); +UNARY_METHOD(PyTensorObject_sin_, functional::Sin_); +UNARY_METHOD(PyTensorObject_asin, functional::Asin); +UNARY_METHOD(PyTensorObject_cos, functional::Cos); +UNARY_METHOD(PyTensorObject_acos, functional::Acos); +UNARY_METHOD(PyTensorObject_tan, functional::Tan); +UNARY_METHOD(PyTensorObject_atan, functional::Atan); +UNARY_METHOD(PyTensorObject_sinh, functional::Sinh); +UNARY_METHOD(PyTensorObject_asinh, functional::Asinh); +UNARY_METHOD(PyTensorObject_cosh, functional::Cosh); +UNARY_METHOD(PyTensorObject_acosh, functional::Acosh); +UNARY_METHOD(PyTensorObject_tanh, functional::Tanh); +UNARY_METHOD(PyTensorObject_atanh, functional::Atanh); static PyObject* PyTensorObject_byte(PyObject* self, PyObject* unused) { HANDLE_ERRORS - // std::cout << "cpython byte" << std::endl; - return PyTensor_New(ASSERT_PTR(functional::To(PyTensor_Unpack(self), DType::Int8(), false))); + return PyTensor_New(ASSERT_PTR(functional::To(PyTensor_Unpack(self), DType::UInt8(), false))); END_HANDLE_ERRORS } static PyObject* PyTensorObject_dim(PyObject* self, PyObject* unused) { HANDLE_ERRORS - // std::cout << "cpython dim" << std::endl; return functional::CastToPyObject(PyTensor_Unpack(self)->ndim()); END_HANDLE_ERRORS } static PyObject* PyTensorObject_nelement(PyObject* self, PyObject* unused) { HANDLE_ERRORS - // std::cout << "cpython nelement" << std::endl; return functional::CastToPyObject(PyTensor_Unpack(self)->nelement()); END_HANDLE_ERRORS } static PyObject* PyTensorObject_element_size(PyObject* self, PyObject* unused) { HANDLE_ERRORS - // std::cout << "cpython element_size" << std::endl; return functional::CastToPyObject(PyTensor_Unpack(self)->dtype()->bytes()); END_HANDLE_ERRORS } static PyObject* PyTensorObject_get_device(PyObject* self, PyObject* unused) { HANDLE_ERRORS - // std::cout << "cpython get_device" << std::endl; auto device_type = ASSERT(PyTensor_Unpack(self)->device())->enum_type(); CHECK_OR_THROW(device_type == DeviceType::kCUDA) << "get_device is only available for GPU tensor."; @@ -220,56 +245,6 @@ static PyObject* PyTensorObject_get_device(PyObject* self, PyObject* unused) { END_HANDLE_ERRORS } -#define UNARY_METHOD(func_name, bind_func, name) \ - static PyObject* func_name(PyObject* self, PyObject* unused) { \ - HANDLE_ERRORS \ - return PyTensor_New(ASSERT_PTR(bind_func(PyTensor_Unpack(self)))); \ - END_HANDLE_ERRORS \ - } - -UNARY_METHOD(PyTensorObject_abs, functional::Abs, "abs"); -UNARY_METHOD(PyTensorObject_exp, functional::Exp, "exp"); -UNARY_METHOD(PyTensorObject_floor, functional::Floor, "floor"); -UNARY_METHOD(PyTensorObject_floor_, functional::Floor_, "floor_"); -UNARY_METHOD(PyTensorObject_sign, functional::Sign, "sign"); -UNARY_METHOD(PyTensorObject_gelu, functional::Gelu, "gelu"); -UNARY_METHOD(PyTensorObject_mish, functional::Mish, "mish"); -UNARY_METHOD(PyTensorObject_negative, functional::Negative, "negatinve"); -UNARY_METHOD(PyTensorObject_sigmoid, functional::Sigmoid, "sigmoid"); -UNARY_METHOD(PyTensorObject_silu, functional::Silu, "silu"); -UNARY_METHOD(PyTensorObject_selu, functional::Selu, "selu"); -UNARY_METHOD(PyTensorObject_softsign, functional::SoftSign, "softsign"); -UNARY_METHOD(PyTensorObject_log1p, functional::Log1p, "log1p"); -UNARY_METHOD(PyTensorObject_log2, functional::Log2, "log2"); -UNARY_METHOD(PyTensorObject_reciprocal, functional::Reciprocal, "reciprocal"); -UNARY_METHOD(PyTensorObject_ceil, functional::Ceil, "ceil"); -UNARY_METHOD(PyTensorObject_erf, functional::Erf, "erf"); -UNARY_METHOD(PyTensorObject_erfc, functional::Erfc, "erfc"); -UNARY_METHOD(PyTensorObject_erfinv, functional::Erfinv, "erfinv"); -UNARY_METHOD(PyTensorObject_erfinv_, functional::ErfinvInplace, "erfinv_inplace"); -UNARY_METHOD(PyTensorObject_expm1, functional::Expm1, "expm1"); -UNARY_METHOD(PyTensorObject_log, functional::Log, "log"); -UNARY_METHOD(PyTensorObject_rsqrt, functional::Rsqrt, "rsqrt"); -UNARY_METHOD(PyTensorObject_sqrt, functional::Sqrt, "sqrt"); -UNARY_METHOD(PyTensorObject_square, functional::Square, "square"); -UNARY_METHOD(PyTensorObject_round, functional::Round, "round"); -UNARY_METHOD(PyTensorObject_t, functional::TransposeAllDimFunction, "t"); -UNARY_METHOD(PyTensorObject_isnan, functional::IsNan, "isnan"); -UNARY_METHOD(PyTensorObject_isinf, functional::IsInf, "isinf"); -UNARY_METHOD(PyTensorObject_sin, functional::Sin, "sin"); -UNARY_METHOD(PyTensorObject_sin_, functional::Sin_, "sin_"); -UNARY_METHOD(PyTensorObject_asin, functional::Asin, "asin"); -UNARY_METHOD(PyTensorObject_cos, functional::Cos, "cos"); -UNARY_METHOD(PyTensorObject_acos, functional::Acos, "acos"); -UNARY_METHOD(PyTensorObject_tan, functional::Tan, "Tan"); -UNARY_METHOD(PyTensorObject_atan, functional::Atan, "atan"); -UNARY_METHOD(PyTensorObject_sinh, functional::Sinh, "sinh"); -UNARY_METHOD(PyTensorObject_asinh, functional::Asinh, "asinh"); -UNARY_METHOD(PyTensorObject_cosh, functional::Cosh, "cosh"); -UNARY_METHOD(PyTensorObject_acosh, functional::Acosh, "acosh"); -UNARY_METHOD(PyTensorObject_tanh, functional::Tanh, "tanh"); -UNARY_METHOD(PyTensorObject_atanh, functional::Atanh, "atanh"); - PyMethodDef PyTensorObject_extra_methods[] = { {"byte", PyTensorObject_byte, METH_NOARGS, NULL}, {"dim", PyTensorObject_dim, METH_NOARGS, NULL}, @@ -327,7 +302,6 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"sin_", PyTensorObject_sin_, METH_NOARGS, NULL}, {"isnan", PyTensorObject_isnan, METH_NOARGS, NULL}, {"isinf", PyTensorObject_isinf, METH_NOARGS, NULL}, - {"floor_divide", PyTensorObject_div, METH_O, NULL}, {"floor", PyTensorObject_floor, METH_NOARGS, NULL}, {"floor_", PyTensorObject_floor_, METH_NOARGS, NULL}, @@ -335,9 +309,7 @@ PyMethodDef PyTensorObject_extra_methods[] = { }; // tp_richcompare - PyObject* PyTensorObject_richcompare(PyObject* self, PyObject* other, int op) { - std::cout << "cpython compare" << std::endl; PyObject* tuple = PyTuple_Pack(2, self, other); switch (op) { @@ -354,8 +326,6 @@ PyObject* PyTensorObject_richcompare(PyObject* self, PyObject* other, int op) { return NULL; } -// normal methods - } // namespace one } // namespace oneflow diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index bd869c85c35..b61eefd0b77 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -1163,52 +1163,52 @@ def inplace_contiguous_(self): def RegisterMethods(): - Tensor.__mul__ = lambda self, other: self.mul(other) - Tensor.__rmul__ = lambda self, other: self.mul(other) - Tensor.__add__ = lambda self, other: self.add(other) + # Tensor.__mul__ = lambda self, other: self.mul(other) + # Tensor.__rmul__ = lambda self, other: self.mul(other) + # Tensor.__add__ = lambda self, other: self.add(other) Tensor.__iadd__ = lambda self, other: self.add_(other) - Tensor.__matmul__ = lambda self, other: self.matmul(other) - Tensor.byte = _byte + # Tensor.__matmul__ = lambda self, other: self.matmul(other) + # Tensor.byte = _byte Tensor.ndim = property(_ndim) Tensor.numpy = _numpy Tensor.size = _size - Tensor.dim = _ndim - Tensor.ndimension = _ndim - Tensor.nelement = _nelement - Tensor.numel = _numel - Tensor.element_size = _element_size + # Tensor.dim = _ndim + # Tensor.ndimension = _ndim + # Tensor.nelement = _nelement + # Tensor.numel = _numel + # Tensor.element_size = _element_size Tensor.backward = _backward Tensor.__setitem__ = _setitem Tensor.__str__ = _str Tensor.__repr__ = _repr - Tensor.__eq__ = _eq - Tensor.__ne__ = _ne + # Tensor.__eq__ = _eq + # Tensor.__ne__ = _ne Tensor.__bool__ = is_nonzero - Tensor.__gt__ = _gt - Tensor.__lt__ = _lt - Tensor.__ge__ = _ge - Tensor.__le__ = _le - Tensor.__and__ = _and - Tensor.__or__ = _or - Tensor.__xor__ = _xor - Tensor.__mul__ = _mul - Tensor.__rmul__ = _rmul - Tensor.__add__ = _add + # Tensor.__gt__ = _gt + # Tensor.__lt__ = _lt + # Tensor.__ge__ = _ge + # Tensor.__le__ = _le + # Tensor.__and__ = _and + # Tensor.__or__ = _or + # Tensor.__xor__ = _xor + # Tensor.__mul__ = _mul + # Tensor.__rmul__ = _rmul + # Tensor.__add__ = _add Tensor.__iadd__ = _iadd - Tensor.__radd__ = _radd + # Tensor.__radd__ = _radd Tensor.addmm = _addmm - Tensor.__sub__ = _sub - Tensor.__rsub__ = _rsub - Tensor.__truediv__ = _truediv - Tensor.__rtruediv__ = _rtruediv - Tensor.__neg__ = _neg - Tensor.__pow__ = _pow - Tensor.__rpow__ = _rpow + # Tensor.__sub__ = _sub + # Tensor.__rsub__ = _rsub + # Tensor.__truediv__ = _truediv + # Tensor.__rtruediv__ = _rtruediv + # Tensor.__neg__ = _neg + # Tensor.__pow__ = _pow + # Tensor.__rpow__ = _rpow Tensor.__format__ = _format - Tensor.__floordiv__ = _floor_divide - Tensor.__mod__ = _fmod + # Tensor.__floordiv__ = _floor_divide + # Tensor.__mod__ = _fmod Tensor.__index__ = _index - Tensor.__invert__ = _invert + # Tensor.__invert__ = _invert Tensor.__float__ = _scalar_float Tensor.__int__ = _scalar_int Tensor.__array__ = _numpy @@ -1222,44 +1222,44 @@ def RegisterMethods(): Tensor.normal_ = _normal Tensor.fill_ = _fill Tensor.copy_ = _copy - Tensor.get_device = _get_device + # Tensor.get_device = _get_device Tensor._meta_repr = _meta_repr - Tensor.abs = _abs - Tensor.exp = _exp + # Tensor.abs = _abs + # Tensor.exp = _exp Tensor.floor_divide = _floor_divide - Tensor.floor = _floor - Tensor.floor_ = _floor_inplace_ + # Tensor.floor = _floor + # Tensor.floor_ = _floor_inplace_ Tensor.argmax = _argmax Tensor.argmin = _argmin Tensor.argsort = _argsort Tensor.argwhere = _argwhere - Tensor.acos = _acos - Tensor.arccos = _arccos - Tensor.acosh = _acosh + # Tensor.acos = _acos + # Tensor.arccos = _arccos + # Tensor.acosh = _acosh Tensor.amin = _amin - Tensor.arccosh = _arccosh - Tensor.atanh = _atanh + # Tensor.arccosh = _arccosh + # Tensor.atanh = _atanh Tensor.atan2 = _atan2 - Tensor.arctanh = _arctanh - Tensor.sign = _sign - Tensor.sinh = _sinh - Tensor.tan = _tan + # Tensor.arctanh = _arctanh + # Tensor.sign = _sign + # Tensor.sinh = _sinh + # Tensor.tan = _tan Tensor.gt = _gt Tensor.ge = _ge - Tensor.gelu = _gelu - Tensor.mish = _mish - Tensor.negative = _negative - Tensor.neg = _neg - Tensor.sigmoid = _sigmoid - Tensor.tanh = _tanh - Tensor.silu = _silu - Tensor.selu = _selu - Tensor.softsign = _softsign + # Tensor.gelu = _gelu + # Tensor.mish = _mish + # Tensor.negative = _negative + # Tensor.neg = _neg + # Tensor.sigmoid = _sigmoid + # Tensor.tanh = _tanh + # Tensor.silu = _silu + # Tensor.selu = _selu + # Tensor.softsign = _softsign Tensor.cast = _cast Tensor.diag = _diag Tensor.diagonal = _diagonal - Tensor.log1p = _log1p - Tensor.log2 = _log2 + # Tensor.log1p = _log1p + # Tensor.log2 = _log2 Tensor.add = _add Tensor.add_ = _add_inplace Tensor.addcmul = _addcmul @@ -1268,50 +1268,50 @@ def RegisterMethods(): Tensor.div_ = _truediv_inplace Tensor.mul = _mul Tensor.mul_ = _mul_ - Tensor.reciprocal = _reciprocal + # Tensor.reciprocal = _reciprocal Tensor.sub = _sub Tensor.sub_ = _sub_inplace - Tensor.asin = _asin - Tensor.arcsin = _arcsin - Tensor.asinh = _asinh - Tensor.arcsinh = _arcsinh - Tensor.atan = _atan - Tensor.arctan = _arctan - Tensor.ceil = _ceil + # Tensor.asin = _asin + # Tensor.arcsin = _arcsin + # Tensor.asinh = _asinh + # Tensor.arcsinh = _arcsinh + # Tensor.atan = _atan + # Tensor.arctan = _arctan + # Tensor.ceil = _ceil Tensor.clamp = _clamp Tensor.clamp_ = _clamp_ Tensor.clip = _clip Tensor.clip_ = _clip_ - Tensor.cos = _cos - Tensor.cosh = _cosh + # Tensor.cos = _cos + # Tensor.cosh = _cosh Tensor.cpu = _cpu Tensor.cuda = _cuda Tensor.expand = _expand Tensor.expand_as = _expand_as - Tensor.erf = _erf - Tensor.erfc = _erfc - Tensor.erfinv = _erfinv - Tensor.erfinv_ = _erfinv_inplace - Tensor.expm1 = _expm1 + # Tensor.erf = _erf + # Tensor.erfc = _erfc + # Tensor.erfinv = _erfinv + # Tensor.erfinv_ = _erfinv_inplace + # Tensor.expm1 = _expm1 Tensor.fmod = _fmod Tensor.flatten = _flatten Tensor.flip = _flip Tensor.in_top_k = _in_top_k Tensor.index_select = _index_select - Tensor.log = _log + # Tensor.log = _log Tensor.minimum = _minimum Tensor.maximum = _maximum Tensor.new_empty = _new_empty Tensor.new_ones = _new_ones Tensor.new_zeros = _new_zeros Tensor.pow = _pow - Tensor.rsqrt = _rsqrt - Tensor.sqrt = _sqrt - Tensor.square = _square + # Tensor.rsqrt = _rsqrt + # Tensor.sqrt = _sqrt + # Tensor.square = _square Tensor.var = _var Tensor.std = _std Tensor.matmul = _matmul - Tensor.round = _round + # Tensor.round = _round Tensor.softplus = _softplus Tensor.tril = _tril Tensor.triu = _triu @@ -1350,7 +1350,7 @@ def RegisterMethods(): Tensor.all = _all Tensor.any = _any Tensor.T = property(_T) - Tensor.t = _t + # Tensor.t = _t Tensor.masked_fill = _masked_fill Tensor.masked_select = _masked_select Tensor.eq = _eq @@ -1380,17 +1380,18 @@ def RegisterMethods(): Tensor.sum = _sum Tensor.mean = _mean Tensor.prod = _prod - Tensor.sin = _sin - Tensor.sin_ = _sin_inplace + # Tensor.sin = _sin + # Tensor.sin_ = _sin_inplace Tensor.is_consistent = _is_consistent Tensor.to_consistent = _to_consistent - Tensor.isnan = _isnan - Tensor.isinf = _isinf + # Tensor.isnan = _isnan + # Tensor.isinf = _isinf Tensor.new_tensor = _new_tensor Tensor.cumsum = _cumsum Tensor.cumprod = _cumprod Tensor.contiguous_ = inplace_contiguous_ + def register_tensor_op(op_name): def set_tensor_op(method): setattr(Tensor, op_name, method) diff --git a/python/oneflow/nn/modules/math_ops.py b/python/oneflow/nn/modules/math_ops.py index 6a418d67133..cfc5f1b1431 100644 --- a/python/oneflow/nn/modules/math_ops.py +++ b/python/oneflow/nn/modules/math_ops.py @@ -110,7 +110,7 @@ def arcsinh_op(input): return flow._C.asinh(input) -@register_tensor_op("asinh") +# @register_tensor_op("asinh") def asinh_op_tensor(input): """ @@ -119,7 +119,7 @@ def asinh_op_tensor(input): return flow._C.asinh(input) -@register_tensor_op("sin_") +# @register_tensor_op("sin_") def inplace_sin_op_tensor(input): """ In-place version of :func:`oneflow.sin` From b26c8ceb8be420286f649ae8ce8e376b40f1e48a Mon Sep 17 00:00:00 2001 From: WangYi Date: Wed, 25 May 2022 11:31:58 +0800 Subject: [PATCH 05/50] refine code, remove commented tensor.py --- oneflow/api/python/framework/tensor.cpp | 2 - python/oneflow/framework/tensor.py | 83 ------------------------- python/oneflow/nn/modules/math_ops.py | 2 - 3 files changed, 87 deletions(-) diff --git a/oneflow/api/python/framework/tensor.cpp b/oneflow/api/python/framework/tensor.cpp index bbd149f5ca0..79547e45e8d 100644 --- a/oneflow/api/python/framework/tensor.cpp +++ b/oneflow/api/python/framework/tensor.cpp @@ -15,10 +15,8 @@ limitations under the License. */ #include "oneflow/api/python/framework/tensor.h" -#include #include #include -#include #include "oneflow/api/python/exception/exception.h" #include "oneflow/api/python/framework/size.h" #include "oneflow/api/python/functional/common.h" diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index b61eefd0b77..a03b3b2d192 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -1163,52 +1163,19 @@ def inplace_contiguous_(self): def RegisterMethods(): - # Tensor.__mul__ = lambda self, other: self.mul(other) - # Tensor.__rmul__ = lambda self, other: self.mul(other) - # Tensor.__add__ = lambda self, other: self.add(other) Tensor.__iadd__ = lambda self, other: self.add_(other) - # Tensor.__matmul__ = lambda self, other: self.matmul(other) - # Tensor.byte = _byte Tensor.ndim = property(_ndim) Tensor.numpy = _numpy Tensor.size = _size - # Tensor.dim = _ndim - # Tensor.ndimension = _ndim - # Tensor.nelement = _nelement - # Tensor.numel = _numel - # Tensor.element_size = _element_size Tensor.backward = _backward Tensor.__setitem__ = _setitem Tensor.__str__ = _str Tensor.__repr__ = _repr - # Tensor.__eq__ = _eq - # Tensor.__ne__ = _ne Tensor.__bool__ = is_nonzero - # Tensor.__gt__ = _gt - # Tensor.__lt__ = _lt - # Tensor.__ge__ = _ge - # Tensor.__le__ = _le - # Tensor.__and__ = _and - # Tensor.__or__ = _or - # Tensor.__xor__ = _xor - # Tensor.__mul__ = _mul - # Tensor.__rmul__ = _rmul - # Tensor.__add__ = _add Tensor.__iadd__ = _iadd - # Tensor.__radd__ = _radd Tensor.addmm = _addmm - # Tensor.__sub__ = _sub - # Tensor.__rsub__ = _rsub - # Tensor.__truediv__ = _truediv - # Tensor.__rtruediv__ = _rtruediv - # Tensor.__neg__ = _neg - # Tensor.__pow__ = _pow - # Tensor.__rpow__ = _rpow Tensor.__format__ = _format - # Tensor.__floordiv__ = _floor_divide - # Tensor.__mod__ = _fmod Tensor.__index__ = _index - # Tensor.__invert__ = _invert Tensor.__float__ = _scalar_float Tensor.__int__ = _scalar_int Tensor.__array__ = _numpy @@ -1222,44 +1189,19 @@ def RegisterMethods(): Tensor.normal_ = _normal Tensor.fill_ = _fill Tensor.copy_ = _copy - # Tensor.get_device = _get_device Tensor._meta_repr = _meta_repr - # Tensor.abs = _abs - # Tensor.exp = _exp Tensor.floor_divide = _floor_divide - # Tensor.floor = _floor - # Tensor.floor_ = _floor_inplace_ Tensor.argmax = _argmax Tensor.argmin = _argmin Tensor.argsort = _argsort Tensor.argwhere = _argwhere - # Tensor.acos = _acos - # Tensor.arccos = _arccos - # Tensor.acosh = _acosh Tensor.amin = _amin - # Tensor.arccosh = _arccosh - # Tensor.atanh = _atanh Tensor.atan2 = _atan2 - # Tensor.arctanh = _arctanh - # Tensor.sign = _sign - # Tensor.sinh = _sinh - # Tensor.tan = _tan Tensor.gt = _gt Tensor.ge = _ge - # Tensor.gelu = _gelu - # Tensor.mish = _mish - # Tensor.negative = _negative - # Tensor.neg = _neg - # Tensor.sigmoid = _sigmoid - # Tensor.tanh = _tanh - # Tensor.silu = _silu - # Tensor.selu = _selu - # Tensor.softsign = _softsign Tensor.cast = _cast Tensor.diag = _diag Tensor.diagonal = _diagonal - # Tensor.log1p = _log1p - # Tensor.log2 = _log2 Tensor.add = _add Tensor.add_ = _add_inplace Tensor.addcmul = _addcmul @@ -1268,50 +1210,30 @@ def RegisterMethods(): Tensor.div_ = _truediv_inplace Tensor.mul = _mul Tensor.mul_ = _mul_ - # Tensor.reciprocal = _reciprocal Tensor.sub = _sub Tensor.sub_ = _sub_inplace - # Tensor.asin = _asin - # Tensor.arcsin = _arcsin - # Tensor.asinh = _asinh - # Tensor.arcsinh = _arcsinh - # Tensor.atan = _atan - # Tensor.arctan = _arctan - # Tensor.ceil = _ceil Tensor.clamp = _clamp Tensor.clamp_ = _clamp_ Tensor.clip = _clip Tensor.clip_ = _clip_ - # Tensor.cos = _cos - # Tensor.cosh = _cosh Tensor.cpu = _cpu Tensor.cuda = _cuda Tensor.expand = _expand Tensor.expand_as = _expand_as - # Tensor.erf = _erf - # Tensor.erfc = _erfc - # Tensor.erfinv = _erfinv - # Tensor.erfinv_ = _erfinv_inplace - # Tensor.expm1 = _expm1 Tensor.fmod = _fmod Tensor.flatten = _flatten Tensor.flip = _flip Tensor.in_top_k = _in_top_k Tensor.index_select = _index_select - # Tensor.log = _log Tensor.minimum = _minimum Tensor.maximum = _maximum Tensor.new_empty = _new_empty Tensor.new_ones = _new_ones Tensor.new_zeros = _new_zeros Tensor.pow = _pow - # Tensor.rsqrt = _rsqrt - # Tensor.sqrt = _sqrt - # Tensor.square = _square Tensor.var = _var Tensor.std = _std Tensor.matmul = _matmul - # Tensor.round = _round Tensor.softplus = _softplus Tensor.tril = _tril Tensor.triu = _triu @@ -1350,7 +1272,6 @@ def RegisterMethods(): Tensor.all = _all Tensor.any = _any Tensor.T = property(_T) - # Tensor.t = _t Tensor.masked_fill = _masked_fill Tensor.masked_select = _masked_select Tensor.eq = _eq @@ -1380,12 +1301,8 @@ def RegisterMethods(): Tensor.sum = _sum Tensor.mean = _mean Tensor.prod = _prod - # Tensor.sin = _sin - # Tensor.sin_ = _sin_inplace Tensor.is_consistent = _is_consistent Tensor.to_consistent = _to_consistent - # Tensor.isnan = _isnan - # Tensor.isinf = _isinf Tensor.new_tensor = _new_tensor Tensor.cumsum = _cumsum Tensor.cumprod = _cumprod diff --git a/python/oneflow/nn/modules/math_ops.py b/python/oneflow/nn/modules/math_ops.py index cfc5f1b1431..adbf43a2002 100644 --- a/python/oneflow/nn/modules/math_ops.py +++ b/python/oneflow/nn/modules/math_ops.py @@ -110,7 +110,6 @@ def arcsinh_op(input): return flow._C.asinh(input) -# @register_tensor_op("asinh") def asinh_op_tensor(input): """ @@ -119,7 +118,6 @@ def asinh_op_tensor(input): return flow._C.asinh(input) -# @register_tensor_op("sin_") def inplace_sin_op_tensor(input): """ In-place version of :func:`oneflow.sin` From 63a09ec2aa6f8afdaaf97cbcc8165baae85b8a01 Mon Sep 17 00:00:00 2001 From: WangYi Date: Wed, 25 May 2022 13:22:32 +0800 Subject: [PATCH 06/50] refine code --- oneflow/api/python/framework/tensor.cpp | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/oneflow/api/python/framework/tensor.cpp b/oneflow/api/python/framework/tensor.cpp index 79547e45e8d..cbb52cfa931 100644 --- a/oneflow/api/python/framework/tensor.cpp +++ b/oneflow/api/python/framework/tensor.cpp @@ -321,23 +321,22 @@ static PyObject* PyTensorObject__register_storage_delete_hook(PyObject* self, Py END_HANDLE_ERRORS } -PyMethodDef* PyTensorObject_methods = NULL; -PyMethodDef* concat_method_def(PyMethodDef original_methods[], PyMethodDef extra_methods[]) { +PyMethodDef* concat_method_def(PyMethodDef methods[], PyMethodDef extra_methods[]) { int len1 = 0; int len2 = 0; - PyMethodDef* p1 = original_methods; + PyMethodDef* p1 = methods; PyMethodDef* p2 = extra_methods; while ((p1++)->ml_name != NULL) { len1++; } while ((p2++)->ml_name != NULL) { len2++; } PyMethodDef* total_methods = new PyMethodDef[len1 + len2 + 1]; - for (int i = 0; i < len1; i++) total_methods[i] = original_methods[i]; + for (int i = 0; i < len1; i++) total_methods[i] = methods[i]; for (int i = 0; i < len2; i++) total_methods[i + len1] = extra_methods[i]; total_methods[len1 + len2] = {NULL}; return total_methods; } -static PyMethodDef PyTensorObject_original_methods[] = { +static PyMethodDef PyTensorObject_methods[] = { {"storage_offset", PyTensorObject_storage_offset, METH_NOARGS, NULL}, {"stride", PyTensorObject_stride, METH_NOARGS, NULL}, {"is_contiguous", PyTensorObject_is_contiguous, METH_NOARGS, NULL}, @@ -569,11 +568,9 @@ static PyTypeObject* MakeTensorType() { type->tp_init = PyTensorObject_init; type->tp_dealloc = PyTensorObject_dealloc; type->tp_getset = PyTensorObject_properties; - // type->tp_methods = PyTensorObject_original_methods; type->tp_methods = - concat_method_def(PyTensorObject_original_methods, PyTensorObject_extra_methods); + concat_method_def(PyTensorObject_methods, PyTensorObject_extra_methods); - // type->tp_as_number = &heap_type->as_number; type->tp_as_number = &PyTensorObject_as_number; type->tp_as_sequence = &PyTensorObject_as_sequence; type->tp_as_mapping = &PyTensorObject_as_mapping; From 05fff5cad9fbebdac9eab6d6460d13da8fca9bbc Mon Sep 17 00:00:00 2001 From: WangYi Date: Thu, 26 May 2022 16:11:46 +0800 Subject: [PATCH 07/50] move some api --- oneflow/api/python/framework/tensor.cpp | 4 +- .../api/python/framework/tensor_functions.cpp | 468 +++++++++++++++--- python/oneflow/framework/tensor.py | 66 +-- 3 files changed, 437 insertions(+), 101 deletions(-) diff --git a/oneflow/api/python/framework/tensor.cpp b/oneflow/api/python/framework/tensor.cpp index cbb52cfa931..2cae2ebccf8 100644 --- a/oneflow/api/python/framework/tensor.cpp +++ b/oneflow/api/python/framework/tensor.cpp @@ -321,7 +321,6 @@ static PyObject* PyTensorObject__register_storage_delete_hook(PyObject* self, Py END_HANDLE_ERRORS } - PyMethodDef* concat_method_def(PyMethodDef methods[], PyMethodDef extra_methods[]) { int len1 = 0; int len2 = 0; @@ -568,8 +567,7 @@ static PyTypeObject* MakeTensorType() { type->tp_init = PyTensorObject_init; type->tp_dealloc = PyTensorObject_dealloc; type->tp_getset = PyTensorObject_properties; - type->tp_methods = - concat_method_def(PyTensorObject_methods, PyTensorObject_extra_methods); + type->tp_methods = concat_method_def(PyTensorObject_methods, PyTensorObject_extra_methods); type->tp_as_number = &PyTensorObject_as_number; type->tp_as_sequence = &PyTensorObject_as_sequence; diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 9c0e921843a..340cdedefe0 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -16,6 +16,7 @@ limitations under the License. #include #include "oneflow/api/python/exception/exception.h" +#include "oneflow/api/python/framework/size.h" #include "oneflow/api/python/functional/common.h" #include "oneflow/api/python/functional/functional_api.yaml.pybind.h" #include "oneflow/core/functional/functional.h" @@ -23,6 +24,8 @@ limitations under the License. namespace oneflow { namespace one { +using functional::PyObjectPtr; + #define ASSERT(x) (x).GetOrThrow() #define ASSERT_PTR(x) (x).GetPtrOrThrow() @@ -46,23 +49,23 @@ namespace one { END_HANDLE_ERRORS \ } -NB_UNARY_FUNC(PyTensorObject_absolute, functional::abs); -NB_UNARY_FUNC(PyTensorObject_negative, functional::negative); - -NB_BINARY_FUNC(PyTensorObject_add, functional::add); -NB_BINARY_FUNC(PyTensorObject_sub, functional::sub); -NB_BINARY_FUNC(PyTensorObject_mul, functional::mul); -NB_BINARY_FUNC(PyTensorObject_fmod, functional::fmod); -NB_BINARY_FUNC(PyTensorObject_div, functional::div); -NB_BINARY_FUNC(PyTensorObject_and, functional::logical_and); -NB_BINARY_FUNC(PyTensorObject_xor, functional::logical_xor); -NB_BINARY_FUNC(PyTensorObject_or, functional::logical_or); -NB_BINARY_FUNC(PyTensorObject_floor_div, functional::floor_divide); -NB_BINARY_FUNC(PyTensorObject_true_div, functional::div); -NB_BINARY_FUNC(PyTensorObject_matrix_multiply, functional::matmul); +NB_UNARY_FUNC(PyTensorObject_nb_absolute, functional::abs); +NB_UNARY_FUNC(PyTensorObject_nb_negative, functional::negative); + +NB_BINARY_FUNC(PyTensorObject_nb_add, functional::add); +NB_BINARY_FUNC(PyTensorObject_nb_sub, functional::sub); +NB_BINARY_FUNC(PyTensorObject_nb_mul, functional::mul); +NB_BINARY_FUNC(PyTensorObject_nb_fmod, functional::fmod); +NB_BINARY_FUNC(PyTensorObject_nb_div, functional::div); +NB_BINARY_FUNC(PyTensorObject_nb_and, functional::logical_and); +NB_BINARY_FUNC(PyTensorObject_nb_xor, functional::logical_xor); +NB_BINARY_FUNC(PyTensorObject_nb_or, functional::logical_or); +NB_BINARY_FUNC(PyTensorObject_nb_floor_div, functional::floor_divide); +NB_BINARY_FUNC(PyTensorObject_nb_true_div, functional::div); +NB_BINARY_FUNC(PyTensorObject_nb_matrix_multiply, functional::matmul); // TODO: not implemented yet // NB_UNARY_FUNC(PyTensorObject_positive, functional::positive); -PyObject* PyTensorObject_pow(PyObject* a, PyObject* b, PyObject* unsed) { +PyObject* PyTensorObject_nb_pow(PyObject* a, PyObject* b, PyObject* unsed) { HANDLE_ERRORS PyObject* tuple = PyTuple_Pack(2, a, b); auto* result = functional::pow(NULL, tuple, NULL); @@ -71,7 +74,7 @@ PyObject* PyTensorObject_pow(PyObject* a, PyObject* b, PyObject* unsed) { END_HANDLE_ERRORS } -static PyObject* PyTensorObject_invert(PyObject* self) { +static PyObject* PyTensorObject_nb_invert(PyObject* self) { HANDLE_ERRORS CHECK_OR_THROW(PyTensor_Unpack(self)->dtype()->data_type() == DataType::kBool) << "~ (operator.invert) is only implemented on integer and Boolean-type tensors"; @@ -96,18 +99,18 @@ static PyObject* PyTensorObject_invert(PyObject* self) { // TODO: still have bug here // NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_add, functional::add, "add"); -NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_sub, functional::sub); -NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_mul, functional::mul); -NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_fmod, functional::fmod); -NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_and, functional::logical_and); -NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_xor, functional::logical_xor); -NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_or, functional::logical_or); -NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_floor_div, functional::floor_divide); -NB_INPLACE_BINARY_FUNC(PyTensorObject_inplace_true_div, functional::div); +NB_INPLACE_BINARY_FUNC(PyTensorObject_nb_inplace_sub, functional::sub); +NB_INPLACE_BINARY_FUNC(PyTensorObject_nb_inplace_mul, functional::mul); +NB_INPLACE_BINARY_FUNC(PyTensorObject_nb_inplace_fmod, functional::fmod); +NB_INPLACE_BINARY_FUNC(PyTensorObject_nb_inplace_and, functional::logical_and); +NB_INPLACE_BINARY_FUNC(PyTensorObject_nb_inplace_xor, functional::logical_xor); +NB_INPLACE_BINARY_FUNC(PyTensorObject_nb_inplace_or, functional::logical_or); +NB_INPLACE_BINARY_FUNC(PyTensorObject_nb_inplace_floor_div, functional::floor_divide); +NB_INPLACE_BINARY_FUNC(PyTensorObject_nb_inplace_true_div, functional::div); // TODO: inplace matmul not supported yet // INPLACE_BINARY_FUNC(PyTensorObject_inplace_matrix_multiply, functional::matmul, "matmul"); -PyObject* PyTensorObject_inplace_pow(PyObject* a, PyObject* b, PyObject* unsed) { +PyObject* PyTensorObject_nb_inplace_pow(PyObject* a, PyObject* b, PyObject* unsed) { HANDLE_ERRORS PyObject* tuple = PyTuple_Pack(2, a, b); PyObject* dict = PyDict_New(); @@ -119,45 +122,45 @@ PyObject* PyTensorObject_inplace_pow(PyObject* a, PyObject* b, PyObject* unsed) } PyNumberMethods PyTensorObject_as_number = { - PyTensorObject_add, // nb_add - PyTensorObject_sub, // nb_subtract - PyTensorObject_mul, // nb_multiply - PyTensorObject_fmod, // nb_remainder - NULL, // nb_divmod - PyTensorObject_pow, // nb_power - PyTensorObject_negative, // nb_negative - NULL, // nb_positive - PyTensorObject_absolute, // nb_absolute - NULL, // nb_bool - PyTensorObject_invert, // nb_invert - NULL, // nb_lshift - NULL, // nb_rshift - PyTensorObject_and, // nb_and - PyTensorObject_xor, // nb_xor - PyTensorObject_or, // nb_or - NULL, // nb_int - NULL, // nb_reserved - NULL, // nb_float - - NULL, // nb_inplace_add - PyTensorObject_inplace_sub, // nb_inplace_sub - PyTensorObject_inplace_mul, // nb_inplace_mul - PyTensorObject_inplace_fmod, // nb_inplace_remainder - PyTensorObject_inplace_pow, // nb_inplace_pow - NULL, // nb_inplace_lshift - NULL, // nb_inplace_rshift - PyTensorObject_inplace_and, // nb_inplace_and - PyTensorObject_inplace_xor, // nb_inplace_xor - PyTensorObject_inplace_or, // nb_inplace_or - - PyTensorObject_floor_div, // nb_floor_div - PyTensorObject_true_div, // nb_true_div - PyTensorObject_inplace_floor_div, // nb_inplace_floor_div - PyTensorObject_inplace_true_div, // nb_inplace_true_div - - NULL, // nb_index - PyTensorObject_matrix_multiply, // nb_matrix_multiply - NULL, // not implemented yet nb_inplace_matrix_multiply + PyTensorObject_nb_add, // nb_add + PyTensorObject_nb_sub, // nb_subtract + PyTensorObject_nb_mul, // nb_multiply + PyTensorObject_nb_fmod, // nb_remainder + NULL, // nb_divmod + PyTensorObject_nb_pow, // nb_power + PyTensorObject_nb_negative, // nb_negative + NULL, // nb_positive + PyTensorObject_nb_absolute, // nb_absolute + NULL, // nb_bool + PyTensorObject_nb_invert, // nb_invert + NULL, // nb_lshift + NULL, // nb_rshift + PyTensorObject_nb_and, // nb_and + PyTensorObject_nb_xor, // nb_xor + PyTensorObject_nb_or, // nb_or + NULL, // nb_int + NULL, // nb_reserved + NULL, // nb_float + + NULL, // nb_inplace_add + PyTensorObject_nb_inplace_sub, // nb_inplace_sub + PyTensorObject_nb_inplace_mul, // nb_inplace_mul + PyTensorObject_nb_inplace_fmod, // nb_inplace_remainder + PyTensorObject_nb_inplace_pow, // nb_inplace_pow + NULL, // nb_inplace_lshift + NULL, // nb_inplace_rshift + PyTensorObject_nb_inplace_and, // nb_inplace_and + PyTensorObject_nb_inplace_xor, // nb_inplace_xor + PyTensorObject_nb_inplace_or, // nb_inplace_or + + PyTensorObject_nb_floor_div, // nb_floor_div + PyTensorObject_nb_true_div, // nb_true_div + PyTensorObject_nb_inplace_floor_div, // nb_inplace_floor_div + PyTensorObject_nb_inplace_true_div, // nb_inplace_true_div + + NULL, // nb_index + PyTensorObject_nb_matrix_multiply, // nb_matrix_multiply + NULL, // not implemented yet nb_inplace_matrix_multiply }; @@ -165,6 +168,7 @@ PyNumberMethods PyTensorObject_as_number = { #define UNARY_METHOD(func_name, bind_func) \ static PyObject* func_name(PyObject* self, PyObject* unused) { \ HANDLE_ERRORS \ + std::cout << "cpython" << std::endl; \ return PyTensor_New(ASSERT_PTR(bind_func(PyTensor_Unpack(self)))); \ END_HANDLE_ERRORS \ } @@ -211,6 +215,43 @@ UNARY_METHOD(PyTensorObject_cosh, functional::Cosh); UNARY_METHOD(PyTensorObject_acosh, functional::Acosh); UNARY_METHOD(PyTensorObject_tanh, functional::Tanh); UNARY_METHOD(PyTensorObject_atanh, functional::Atanh); +UNARY_METHOD(PyTensorObject_logical_not, functional::LogicalNot); + +#define BINARY_METHOD(func_name, bind_func, name) \ + static PyObject* func_name(PyObject* self, PyObject* args, PyObject* kwargs) { \ + HANDLE_ERRORS \ + std::cout << "cpython" << std::endl; \ + PyObject* other = NULL; \ + static const char* keywords[2] = {"other", NULL}; \ + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:" name, const_cast(keywords), \ + &other)) { \ + return NULL; \ + } \ + PyObjectPtr tuple(PyTuple_Pack(2, self, other)); \ + return bind_func(NULL, tuple.get(), NULL); \ + END_HANDLE_ERRORS \ + } + +BINARY_METHOD(PyTensorObject_floor_divide, functional::floor_divide, "floor_divide"); +BINARY_METHOD(PyTensorObject_atan2, functional::atan2, "atan2"); +BINARY_METHOD(PyTensorObject_gt, functional::greater, "gt"); +BINARY_METHOD(PyTensorObject_ge, functional::greater_equal, "ge"); +BINARY_METHOD(PyTensorObject_div, functional::div, "div"); +BINARY_METHOD(PyTensorObject_div_, functional::div_, "div_"); +BINARY_METHOD(PyTensorObject_mul, functional::mul, "mul"); +BINARY_METHOD(PyTensorObject_mul_, functional::mul_, "mul_"); +BINARY_METHOD(PyTensorObject_sub, functional::sub, "sub"); +// TODO: not implemented yet +// BINARY_METHOD(PyTensorObject_sub, functional::sub, "sub_"); +BINARY_METHOD(PyTensorObject_fmod, functional::fmod, "fmod"); +BINARY_METHOD(PyTensorObject_matmul, functional::matmul, "matmul"); +BINARY_METHOD(PyTensorObject_logical_and, functional::logical_and, "logical_and"); +BINARY_METHOD(PyTensorObject_logical_or, functional::logical_or, "logical_or"); +BINARY_METHOD(PyTensorObject_logical_xor, functional::logical_xor, "logical_xor"); +BINARY_METHOD(PyTensorObject_bmm, functional::batch_matmul, "bmm"); +BINARY_METHOD(PyTensorObject_ne, functional::not_equal, "ne"); +BINARY_METHOD(PyTensorObject_lt, functional::less, "lt"); +BINARY_METHOD(PyTensorObject_le, functional::less_equal, "le"); static PyObject* PyTensorObject_byte(PyObject* self, PyObject* unused) { HANDLE_ERRORS @@ -245,14 +286,311 @@ static PyObject* PyTensorObject_get_device(PyObject* self, PyObject* unused) { END_HANDLE_ERRORS } +static PyObject* PyTensorObject_size(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + std::cout << "cpython size" << std::endl; + PyObject* idx = NULL; + static const char* keywords[2] = {"idx", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:size", const_cast(keywords), &idx)) { + return NULL; + } + Shape shape = *PyTensor_Unpack(self)->shape(); + PyObject* shape_object = TensorSize_NewFromShape(shape); + if (idx == NULL || idx == Py_None) return shape_object; + return shape_object->ob_type->tp_as_mapping->mp_subscript(shape_object, idx); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_argmax(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + std::cout << "cpython argmax" << std::endl; + PyObject* dim = Py_None; + PyObject* keepdim = Py_None; + static const char* keywords[3] = {"dim", "keepdim", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:argmax", const_cast(keywords), &dim, + &keepdim)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(1, self)); + PyObjectPtr dict(PyDict_New()); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "dim", dim) > -1); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "keepdim", keepdim) > -1); + return functional::argmax(NULL, tuple.get(), dict.get()); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_argmin(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + std::cout << "cpython argmax" << std::endl; + PyObject* dim = Py_None; + PyObject* keepdim = Py_None; + static const char* keywords[3] = {"dim", "keepdim", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:argmin", const_cast(keywords), &dim, + &keepdim)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(1, self)); + PyObjectPtr dict(PyDict_New()); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "dim", dim) > -1); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "keepdim", keepdim) > -1); + return functional::argmin(NULL, tuple.get(), dict.get()); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_amin(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + std::cout << "cpython amin" << std::endl; + PyObject* dim = Py_None; + PyObject* keepdim = Py_None; + static const char* keywords[3] = {"dim", "keepdim", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:amin", const_cast(keywords), &dim, + &keepdim)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(1, self)); + PyObjectPtr dict(PyDict_New()); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "dim", dim) > -1); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "keepdim", keepdim) > -1); + return functional::amin(NULL, tuple.get(), dict.get()); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_cast(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + std::cout << "cpython" << std::endl; + PyObject* dtype = NULL; + static const char* keywords[2] = {"dtype", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:cast", const_cast(keywords), &dtype)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(2, self, dtype)); + return functional::cast(NULL, tuple.get(), NULL); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_diag(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + std::cout << "cpython" << std::endl; + PyObject* diagonal = PyLong_FromLong(0); + static const char* keywords[2] = {"diagonal", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:diag", const_cast(keywords), + &diagonal)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(2, self, diagonal)); + return functional::diag(NULL, tuple.get(), NULL); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_diagonal(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + std::cout << "cpython" << std::endl; + PyObject* offset = PyLong_FromLong(0); + PyObject* dim1 = PyLong_FromLong(0); + PyObject* dim2 = PyLong_FromLong(1); + static const char* keywords[4] = {"offset", "dim1", "dim2", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOO:diag", const_cast(keywords), &offset, + &dim1, &dim2)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(4, self, offset, dim1, dim2)); + return functional::diagonal(NULL, tuple.get(), NULL); + END_HANDLE_ERRORS +} + +// static PyObject* PyTensorObject_add(PyObject* self, PyObject* args, PyObject* kwargs) { +// HANDLE_ERRORS +// std::cout << "cpython" << std::endl; +// PyObject* other = NULL; +// PyObject* alpha = PyFloat_FromDouble(1.0); +// static const char* keywords[3] = {"other", "alpha", NULL}; +// if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|$O:add", const_cast(keywords), +// &other, &alpha)) { +// return NULL; +// } +// std::cout << "run1" << std::endl; +// if(other == NULL) +// std::cout << "other null" << std::endl; +// std::cout << "run2" << std::endl; +// PyObjectPtr tuple(PyTuple_Pack(3, self, other, alpha));//, other, alpha)); +// // PyObjectPtr dict(PyDict_New()); +// PyObject* dict = PyDict_New(); +// CHECK_OR_THROW(PyDict_SetItemString(dict, "alpha", alpha) > -1); +// CHECK_OR_THROW(PyDict_SetItemString(dict, "inplace", Py_False) > -1); +// std::cout << "run3" << std::endl; +// return functional::add(NULL, tuple.get(), dict); +// END_HANDLE_ERRORS +// } + +static PyObject* PyTensorObject_addcmul(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + std::cout << "cpython" << std::endl; + PyObject* tensor1 = NULL; + PyObject* tensor2 = NULL; + PyObject* value = PyFloat_FromDouble(1.0); + static const char* keywords[4] = {"tensor1", "tensor2", "value", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|$O:addcmul", const_cast(keywords), + &tensor1, &tensor2, &value)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(3, self, tensor1, tensor2)); + PyObjectPtr dict(PyDict_New()); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "value", value) > -1); + return functional::addcmul(NULL, tuple.get(), dict.get()); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_addcmul_(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + std::cout << "cpython" << std::endl; + PyObject* tensor1 = NULL; + PyObject* tensor2 = NULL; + PyObject* value = PyFloat_FromDouble(1.0); + static const char* keywords[4] = {"tensor1", "tensor2", "value", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|$O:addcmul_", const_cast(keywords), + &tensor1, &tensor2, &value)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(3, self, tensor1, tensor2)); + PyObjectPtr dict(PyDict_New()); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "value", value) > -1); + return functional::addcmul_(NULL, tuple.get(), dict.get()); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_sub_(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + std::cout << "cpython" << std::endl; + PyObject* other = NULL; + static const char* keywords[2] = {"other", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:sub_", const_cast(keywords), &other)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(2, self, other)); + PyObjectPtr dict(PyDict_New()); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "inplace", Py_True) > -1); + return functional::sub(NULL, tuple.get(), dict.get()); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_clamp(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + std::cout << "cpython" << std::endl; + PyObject* min = Py_None; + PyObject* max = Py_None; + static const char* keywords[3] = {"min", "max", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:clamp", const_cast(keywords), &min, + &max)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(1, self)); + PyObjectPtr dict(PyDict_New()); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "min", min) > -1); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "max", max) > -1); + return functional::clamp(NULL, tuple.get(), dict.get()); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_clamp_(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + std::cout << "cpython" << std::endl; + PyObject* min = Py_None; + PyObject* max = Py_None; + static const char* keywords[3] = {"min", "max", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:clamp_", const_cast(keywords), &min, + &max)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(1, self)); + PyObjectPtr dict(PyDict_New()); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "min", min) > -1); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "max", max) > -1); + return functional::clamp_(NULL, tuple.get(), dict.get()); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_clip(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + std::cout << "cpython" << std::endl; + PyObject* min = Py_None; + PyObject* max = Py_None; + static const char* keywords[3] = {"min", "max", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:clip", const_cast(keywords), &min, + &max)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(1, self)); + PyObjectPtr dict(PyDict_New()); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "min", min) > -1); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "max", max) > -1); + return functional::clip(NULL, tuple.get(), dict.get()); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_clip_(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + std::cout << "cpython" << std::endl; + PyObject* min = Py_None; + PyObject* max = Py_None; + static const char* keywords[3] = {"min", "max", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:clip", const_cast(keywords), &min, + &max)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(1, self)); + PyObjectPtr dict(PyDict_New()); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "min", min) > -1); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "max", max) > -1); + return functional::clip_(NULL, tuple.get(), dict.get()); + END_HANDLE_ERRORS +} + PyMethodDef PyTensorObject_extra_methods[] = { {"byte", PyTensorObject_byte, METH_NOARGS, NULL}, + {"size", (PyCFunction)PyTensorObject_size, METH_VARARGS | METH_KEYWORDS, NULL}, + {"argmax", (PyCFunction)PyTensorObject_argmax, METH_VARARGS | METH_KEYWORDS, NULL}, + {"argmin", (PyCFunction)PyTensorObject_argmin, METH_VARARGS | METH_KEYWORDS, NULL}, + {"amin", (PyCFunction)PyTensorObject_amin, METH_VARARGS | METH_KEYWORDS, NULL}, {"dim", PyTensorObject_dim, METH_NOARGS, NULL}, {"ndimension", PyTensorObject_dim, METH_NOARGS, NULL}, {"nelement", PyTensorObject_nelement, METH_NOARGS, NULL}, {"numel", PyTensorObject_nelement, METH_NOARGS, NULL}, {"element_size", PyTensorObject_element_size, METH_NOARGS, NULL}, {"get_device", PyTensorObject_get_device, METH_NOARGS, NULL}, + {"cast", (PyCFunction)PyTensorObject_cast, METH_VARARGS | METH_KEYWORDS, NULL}, + {"diag", (PyCFunction)PyTensorObject_diag, METH_VARARGS | METH_KEYWORDS, NULL}, + {"diagonal", (PyCFunction)PyTensorObject_diagonal, METH_VARARGS | METH_KEYWORDS, NULL}, + // {"add", (PyCFunction)PyTensorObject_add, METH_VARARGS | METH_KEYWORDS, NULL}, + {"addcmul", (PyCFunction)PyTensorObject_addcmul, METH_VARARGS | METH_KEYWORDS, NULL}, + {"addcmul_", (PyCFunction)PyTensorObject_addcmul, METH_VARARGS | METH_KEYWORDS, NULL}, + {"sub_", (PyCFunction)PyTensorObject_sub_, METH_VARARGS | METH_KEYWORDS, NULL}, + {"clamp", (PyCFunction)PyTensorObject_clamp, METH_VARARGS | METH_KEYWORDS, NULL}, + {"clamp_", (PyCFunction)PyTensorObject_clamp_, METH_VARARGS | METH_KEYWORDS, NULL}, + {"clip", (PyCFunction)PyTensorObject_clip, METH_VARARGS | METH_KEYWORDS, NULL}, + {"clip_", (PyCFunction)PyTensorObject_clip_, METH_VARARGS | METH_KEYWORDS, NULL}, + + // macro BINARY_METHOD + {"floor_divide", (PyCFunction)PyTensorObject_floor_divide, METH_VARARGS | METH_KEYWORDS, NULL}, + {"atan2", (PyCFunction)PyTensorObject_atan2, METH_VARARGS | METH_KEYWORDS, NULL}, + {"gt", (PyCFunction)PyTensorObject_gt, METH_VARARGS | METH_KEYWORDS, NULL}, + {"ge", (PyCFunction)PyTensorObject_ge, METH_VARARGS | METH_KEYWORDS, NULL}, + {"div", (PyCFunction)PyTensorObject_div, METH_VARARGS | METH_KEYWORDS, NULL}, + {"floor_divide", (PyCFunction)PyTensorObject_div, METH_VARARGS | METH_KEYWORDS, NULL}, + {"div_", (PyCFunction)PyTensorObject_div_, METH_VARARGS | METH_KEYWORDS, NULL}, + {"mul", (PyCFunction)PyTensorObject_mul, METH_VARARGS | METH_KEYWORDS, NULL}, + {"mul_", (PyCFunction)PyTensorObject_mul_, METH_VARARGS | METH_KEYWORDS, NULL}, + {"sub", (PyCFunction)PyTensorObject_sub, METH_VARARGS | METH_KEYWORDS, NULL}, + {"fmod", (PyCFunction)PyTensorObject_fmod, METH_VARARGS | METH_KEYWORDS, NULL}, + {"matmul", (PyCFunction)PyTensorObject_matmul, METH_VARARGS | METH_KEYWORDS, NULL}, + {"logical_and", (PyCFunction)PyTensorObject_logical_and, METH_VARARGS | METH_KEYWORDS, NULL}, + {"logical_or", (PyCFunction)PyTensorObject_logical_or, METH_VARARGS | METH_KEYWORDS, NULL}, + {"logical_xor", (PyCFunction)PyTensorObject_logical_xor, METH_VARARGS | METH_KEYWORDS, NULL}, + {"bmm", (PyCFunction)PyTensorObject_bmm, METH_VARARGS | METH_KEYWORDS, NULL}, + {"ne", (PyCFunction)PyTensorObject_ne, METH_VARARGS | METH_KEYWORDS, NULL}, + {"lt", (PyCFunction)PyTensorObject_lt, METH_VARARGS | METH_KEYWORDS, NULL}, + {"le", (PyCFunction)PyTensorObject_le, METH_VARARGS | METH_KEYWORDS, NULL}, + + // macro UNARY_METHOD {"abs", PyTensorObject_abs, METH_NOARGS, NULL}, {"exp", PyTensorObject_exp, METH_NOARGS, NULL}, {"floor", PyTensorObject_floor, METH_NOARGS, NULL}, @@ -302,9 +640,9 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"sin_", PyTensorObject_sin_, METH_NOARGS, NULL}, {"isnan", PyTensorObject_isnan, METH_NOARGS, NULL}, {"isinf", PyTensorObject_isinf, METH_NOARGS, NULL}, - {"floor_divide", PyTensorObject_div, METH_O, NULL}, {"floor", PyTensorObject_floor, METH_NOARGS, NULL}, {"floor_", PyTensorObject_floor_, METH_NOARGS, NULL}, + {"logical_not", PyTensorObject_logical_not, METH_NOARGS, NULL}, {NULL}, }; diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index a03b3b2d192..0ee70259703 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -1166,7 +1166,7 @@ def RegisterMethods(): Tensor.__iadd__ = lambda self, other: self.add_(other) Tensor.ndim = property(_ndim) Tensor.numpy = _numpy - Tensor.size = _size + # Tensor.size = _size Tensor.backward = _backward Tensor.__setitem__ = _setitem Tensor.__str__ = _str @@ -1190,37 +1190,37 @@ def RegisterMethods(): Tensor.fill_ = _fill Tensor.copy_ = _copy Tensor._meta_repr = _meta_repr - Tensor.floor_divide = _floor_divide - Tensor.argmax = _argmax - Tensor.argmin = _argmin + # Tensor.floor_divide = _floor_divide + # Tensor.argmax = _argmax + # Tensor.argmin = _argmin Tensor.argsort = _argsort Tensor.argwhere = _argwhere - Tensor.amin = _amin - Tensor.atan2 = _atan2 - Tensor.gt = _gt - Tensor.ge = _ge - Tensor.cast = _cast - Tensor.diag = _diag - Tensor.diagonal = _diagonal + # Tensor.amin = _amin + # Tensor.atan2 = _atan2 + # Tensor.gt = _gt + # Tensor.ge = _ge + # Tensor.cast = _cast + # Tensor.diag = _diag + # Tensor.diagonal = _diagonal Tensor.add = _add Tensor.add_ = _add_inplace - Tensor.addcmul = _addcmul - Tensor.addcmul_ = _addcmul_ - Tensor.div = _truediv - Tensor.div_ = _truediv_inplace - Tensor.mul = _mul - Tensor.mul_ = _mul_ - Tensor.sub = _sub - Tensor.sub_ = _sub_inplace - Tensor.clamp = _clamp - Tensor.clamp_ = _clamp_ - Tensor.clip = _clip - Tensor.clip_ = _clip_ + # Tensor.addcmul = _addcmul + # Tensor.addcmul_ = _addcmul_ + # Tensor.div = _truediv + # Tensor.div_ = _truediv_inplace + # Tensor.mul = _mul + # Tensor.mul_ = _mul_ + # Tensor.sub = _sub + # Tensor.sub_ = _sub_inplace + # Tensor.clamp = _clamp + # Tensor.clamp_ = _clamp_ + # Tensor.clip = _clip + # Tensor.clip_ = _clip_ Tensor.cpu = _cpu Tensor.cuda = _cuda Tensor.expand = _expand Tensor.expand_as = _expand_as - Tensor.fmod = _fmod + # Tensor.fmod = _fmod Tensor.flatten = _flatten Tensor.flip = _flip Tensor.in_top_k = _in_top_k @@ -1233,7 +1233,7 @@ def RegisterMethods(): Tensor.pow = _pow Tensor.var = _var Tensor.std = _std - Tensor.matmul = _matmul + # Tensor.matmul = _matmul Tensor.softplus = _softplus Tensor.tril = _tril Tensor.triu = _triu @@ -1248,12 +1248,12 @@ def RegisterMethods(): Tensor.relu_ = _relu_inplace Tensor.softmax = _softmax Tensor.log_softmax = _log_softmax - Tensor.logical_and = _and - Tensor.logical_or = _or - Tensor.logical_not = _not - Tensor.logical_xor = _xor + # Tensor.logical_and = _and + # Tensor.logical_or = _or + # Tensor.logical_not = _not + # Tensor.logical_xor = _xor Tensor.roll = _roll - Tensor.bmm = _bmm + # Tensor.bmm = _bmm Tensor.chunk = _chunk Tensor.repeat = _repeat Tensor.tile = _tile @@ -1275,10 +1275,10 @@ def RegisterMethods(): Tensor.masked_fill = _masked_fill Tensor.masked_select = _masked_select Tensor.eq = _eq - Tensor.ne = _ne + # Tensor.ne = _ne Tensor.item = _item - Tensor.lt = _lt - Tensor.le = _le + # Tensor.lt = _lt + # Tensor.le = _le Tensor.to_local = _to_local Tensor.reshape = _reshape Tensor.reshape_as = _reshape_as From 4772aa090fa2a065e300e7be2584b44c107e8a31 Mon Sep 17 00:00:00 2001 From: WangYi Date: Thu, 26 May 2022 16:59:43 +0800 Subject: [PATCH 08/50] add cpu and cuda api --- .../api/python/framework/tensor_functions.cpp | 34 +++++++++++++++++++ python/oneflow/framework/tensor.py | 4 +-- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 340cdedefe0..bd03f50de22 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -545,6 +545,38 @@ static PyObject* PyTensorObject_clip_(PyObject* self, PyObject* args, PyObject* END_HANDLE_ERRORS } +static PyObject* PyTensorObject_cpu(PyObject* self, PyObject* unused) { + HANDLE_ERRORS + std::cout << "cpython" << std::endl; + PyObjectPtr dict(PyDict_New()); + PyObjectPtr device(PyUnicode_FromString("cpu")); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "device", device.get()) > -1); + PyObjectPtr tuple(PyTuple_Pack(1, self)); + return functional::to(NULL, tuple.get(), dict.get()); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_cuda(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + std::cout << "cpython" << std::endl; + PyObject* device = Py_None; + static const char* keywords[2] = {"device", NULL}; + if(!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:cuda", const_cast(keywords), &device)) + { + return NULL; + } + if(device == Py_None) + device = PyUnicode_FromString("cuda"); + else if(PyLong_Check(device)) { + device = PyUnicode_Concat(PyUnicode_FromString("cuda:"), PyUnicode_FromFormat("%d", PyLong_AsLong(device))); + } + PyObjectPtr dict(PyDict_New()); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "device", device) > -1); + PyObjectPtr tuple(PyTuple_Pack(1, self)); + return functional::to(NULL, tuple.get(), dict.get()); + END_HANDLE_ERRORS +} + PyMethodDef PyTensorObject_extra_methods[] = { {"byte", PyTensorObject_byte, METH_NOARGS, NULL}, {"size", (PyCFunction)PyTensorObject_size, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -568,6 +600,8 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"clamp_", (PyCFunction)PyTensorObject_clamp_, METH_VARARGS | METH_KEYWORDS, NULL}, {"clip", (PyCFunction)PyTensorObject_clip, METH_VARARGS | METH_KEYWORDS, NULL}, {"clip_", (PyCFunction)PyTensorObject_clip_, METH_VARARGS | METH_KEYWORDS, NULL}, + {"cpu", PyTensorObject_cpu, METH_NOARGS, NULL}, + {"cuda", (PyCFunction)PyTensorObject_cuda, METH_VARARGS | METH_KEYWORDS, NULL}, // macro BINARY_METHOD {"floor_divide", (PyCFunction)PyTensorObject_floor_divide, METH_VARARGS | METH_KEYWORDS, NULL}, diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index 0ee70259703..47af5a10d5a 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -1216,8 +1216,8 @@ def RegisterMethods(): # Tensor.clamp_ = _clamp_ # Tensor.clip = _clip # Tensor.clip_ = _clip_ - Tensor.cpu = _cpu - Tensor.cuda = _cuda + # Tensor.cpu = _cpu + # Tensor.cuda = _cuda Tensor.expand = _expand Tensor.expand_as = _expand_as # Tensor.fmod = _fmod From 7955c612d1288cd72515337236f2beafa62fa10b Mon Sep 17 00:00:00 2001 From: WangYi Date: Fri, 27 May 2022 10:10:11 +0800 Subject: [PATCH 09/50] add triu tril norm and etc. --- .../api/python/framework/tensor_functions.cpp | 183 +++++++++++++++++- python/oneflow/framework/tensor.py | 20 +- 2 files changed, 188 insertions(+), 15 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index bd03f50de22..6e0fe8b4d15 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -15,10 +15,17 @@ limitations under the License. */ #include +#include +#include +#include +#include +#include #include "oneflow/api/python/exception/exception.h" #include "oneflow/api/python/framework/size.h" +#include "oneflow/api/python/framework/tensor.h" #include "oneflow/api/python/functional/common.h" #include "oneflow/api/python/functional/functional_api.yaml.pybind.h" +#include "oneflow/core/common/throw.h" #include "oneflow/core/functional/functional.h" namespace oneflow { @@ -561,14 +568,15 @@ static PyObject* PyTensorObject_cuda(PyObject* self, PyObject* args, PyObject* k std::cout << "cpython" << std::endl; PyObject* device = Py_None; static const char* keywords[2] = {"device", NULL}; - if(!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:cuda", const_cast(keywords), &device)) - { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:cuda", const_cast(keywords), + &device)) { return NULL; } - if(device == Py_None) + if (device == Py_None) device = PyUnicode_FromString("cuda"); - else if(PyLong_Check(device)) { - device = PyUnicode_Concat(PyUnicode_FromString("cuda:"), PyUnicode_FromFormat("%d", PyLong_AsLong(device))); + else if (PyLong_Check(device)) { + device = PyUnicode_Concat(PyUnicode_FromString("cuda:"), + PyUnicode_FromFormat("%d", PyLong_AsLong(device))); } PyObjectPtr dict(PyDict_New()); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "device", device) > -1); @@ -577,6 +585,161 @@ static PyObject* PyTensorObject_cuda(PyObject* self, PyObject* args, PyObject* k END_HANDLE_ERRORS } +static PyObject* PyTensorObject_in_top_k(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + PyObject* predictions = NULL; + PyObject* k = NULL; + static const char* keywords[3] = {"predictions", "k", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO:in_top_k", const_cast(keywords), + &predictions, &k)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(3, self, predictions, k)); + return functional::in_top_k(NULL, tuple.get(), NULL); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_index_select(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + PyObject* dim = NULL; + PyObject* index = NULL; + static const char* keywords[3] = {"dim", "index", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO:index_select", const_cast(keywords), + &dim, &index)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(3, self, dim, index)); + return functional::index_select(NULL, tuple.get(), NULL); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_minimum(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + PyObject* y = NULL; + static const char* keywords[2] = {"y", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:minimum", const_cast(keywords), &y)) { + return NULL; + } + CHECK_OR_THROW(PyTensor_Check(y)) + << Error::TypeError() << "minimum(): argument 'other' must be tensor, not " + << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(y))); + return PyTensor_New(ASSERT_PTR(functional::Minimum(PyTensor_Unpack(self), PyTensor_Unpack(y)))); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_maximum(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + PyObject* y = NULL; + static const char* keywords[2] = {"y", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:maximum", const_cast(keywords), &y)) { + return NULL; + } + CHECK_OR_THROW(PyTensor_Check(y)) + << Error::TypeError() << "minimum(): argument 'other' must be tensor, not " + << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(y))); + return PyTensor_New(ASSERT_PTR(functional::Minimum(PyTensor_Unpack(self), PyTensor_Unpack(y)))); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_pow(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + PyObject* b = NULL; + static const char* keywords[2] = {"b", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:pow", const_cast(keywords), &b)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(2, self, b)); + return functional::pow(NULL, tuple.get(), NULL); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_var(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + PyObject* dim = Py_None; + PyObject* unbiased = Py_True; + PyObject* keepdim = Py_False; + static const char* keywords[4] = {"dim", "unbiased", "keepdim", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOO:var", const_cast(keywords), &dim, + &unbiased, &keepdim)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(1, self)); + PyObjectPtr dict(PyDict_New()); + PyDict_SetItemString(dict.get(), "dim", dim); + PyDict_SetItemString(dict.get(), "unbiased", unbiased); + PyDict_SetItemString(dict.get(), "keepdim", keepdim); + return functional::var(NULL, tuple.get(), dict.get()); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_std(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + PyObject* dim = Py_None; + PyObject* unbiased = Py_True; + PyObject* keepdim = Py_False; + static const char* keywords[4] = {"dim", "unbiased", "keepdim", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOO:std", const_cast(keywords), &dim, + &unbiased, &keepdim)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(1, self)); + PyObjectPtr dict(PyDict_New()); + PyDict_SetItemString(dict.get(), "dim", dim); + PyDict_SetItemString(dict.get(), "unbiased", unbiased); + PyDict_SetItemString(dict.get(), "keepdim", keepdim); + return functional::std(NULL, tuple.get(), dict.get()); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_tril(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + std::cout << "cpython" << std::endl; + PyObject* diagonal = PyLong_FromLong(0); + static const char* keywords[4] = {"diagonal", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:tril", const_cast(keywords), &diagonal)) { + return NULL; + } + CHECK_OR_THROW(PyLong_Check(diagonal)) + << Error::TypeError() << "tril(): argument 'diagonal' must be int64, not " + << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(diagonal))); + return PyTensor_New(ASSERT_PTR(functional::Tril(PyTensor_Unpack(self), PyLong_AsLong(diagonal)))); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_triu(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + std::cout << "cpython" << std::endl; + PyObject* diagonal = PyLong_FromLong(0); + static const char* keywords[4] = {"diagonal", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:triu", const_cast(keywords), &diagonal)) { + return NULL; + } + CHECK_OR_THROW(PyLong_Check(diagonal)) + << Error::TypeError() << "triu(): argument 'diagonal' must be int64, not " + << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(diagonal))); + return PyTensor_New(ASSERT_PTR(functional::Triu(PyTensor_Unpack(self), PyLong_AsLong(diagonal)))); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_norm(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + PyObject* p = Py_None; + PyObject* dim = Py_None; + PyObject* keepdim= Py_False; + PyObject* dtype = Py_None; + static const char* keywords[5] = {"p", "dim", "keepdim", "dtype" ,NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOOO:norm", const_cast(keywords), &p, &dim,&keepdim, &dtype)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(4, self, p, dim, keepdim)); + PyObjectPtr dict(PyDict_New()); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "dtype", dtype) > -1); + return functional::norm(NULL, tuple.get(), dict.get()); + END_HANDLE_ERRORS +} + + + PyMethodDef PyTensorObject_extra_methods[] = { {"byte", PyTensorObject_byte, METH_NOARGS, NULL}, {"size", (PyCFunction)PyTensorObject_size, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -602,6 +765,16 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"clip_", (PyCFunction)PyTensorObject_clip_, METH_VARARGS | METH_KEYWORDS, NULL}, {"cpu", PyTensorObject_cpu, METH_NOARGS, NULL}, {"cuda", (PyCFunction)PyTensorObject_cuda, METH_VARARGS | METH_KEYWORDS, NULL}, + {"in_top_k", (PyCFunction)PyTensorObject_in_top_k, METH_VARARGS | METH_KEYWORDS, NULL}, + {"index_select", (PyCFunction)PyTensorObject_index_select, METH_VARARGS | METH_KEYWORDS, NULL}, + {"minimum", (PyCFunction)PyTensorObject_minimum, METH_VARARGS | METH_KEYWORDS, NULL}, + {"maximum", (PyCFunction)PyTensorObject_maximum, METH_VARARGS | METH_KEYWORDS, NULL}, + {"pow", (PyCFunction)PyTensorObject_pow, METH_VARARGS | METH_KEYWORDS, NULL}, + {"var", (PyCFunction)PyTensorObject_var, METH_VARARGS | METH_KEYWORDS, NULL}, + {"std", (PyCFunction)PyTensorObject_std, METH_VARARGS | METH_KEYWORDS, NULL}, + {"tril", (PyCFunction)PyTensorObject_tril, METH_VARARGS | METH_KEYWORDS, NULL}, + {"triu", (PyCFunction)PyTensorObject_triu, METH_VARARGS | METH_KEYWORDS, NULL}, + {"norm", (PyCFunction)PyTensorObject_norm, METH_VARARGS | METH_KEYWORDS, NULL}, // macro BINARY_METHOD {"floor_divide", (PyCFunction)PyTensorObject_floor_divide, METH_VARARGS | METH_KEYWORDS, NULL}, diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index 47af5a10d5a..0f608798b1f 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -1223,22 +1223,22 @@ def RegisterMethods(): # Tensor.fmod = _fmod Tensor.flatten = _flatten Tensor.flip = _flip - Tensor.in_top_k = _in_top_k - Tensor.index_select = _index_select - Tensor.minimum = _minimum - Tensor.maximum = _maximum + # Tensor.in_top_k = _in_top_k + # Tensor.index_select = _index_select + # Tensor.minimum = _minimum + # Tensor.maximum = _maximum Tensor.new_empty = _new_empty Tensor.new_ones = _new_ones Tensor.new_zeros = _new_zeros - Tensor.pow = _pow - Tensor.var = _var - Tensor.std = _std + # Tensor.pow = _pow + # Tensor.var = _var + # Tensor.std = _std # Tensor.matmul = _matmul Tensor.softplus = _softplus - Tensor.tril = _tril - Tensor.triu = _triu + # Tensor.tril = _tril + # Tensor.triu = _triu Tensor.where = _where - Tensor.norm = _norm + # Tensor.norm = _norm Tensor.transpose = _transpose Tensor.permute = _permute Tensor.local_to_global = _local_to_global From 76fa59750ebc49293612f91284d36f27af688f29 Mon Sep 17 00:00:00 2001 From: WangYi Date: Fri, 27 May 2022 10:29:58 +0800 Subject: [PATCH 10/50] remove tensor_functions.h --- oneflow/api/python/framework/tensor.cpp | 1 - .../api/python/framework/tensor_functions.h | 31 ------------------- 2 files changed, 32 deletions(-) delete mode 100644 oneflow/api/python/framework/tensor_functions.h diff --git a/oneflow/api/python/framework/tensor.cpp b/oneflow/api/python/framework/tensor.cpp index eec1dd9e6ec..39e3360b47e 100644 --- a/oneflow/api/python/framework/tensor.cpp +++ b/oneflow/api/python/framework/tensor.cpp @@ -36,7 +36,6 @@ limitations under the License. #include "oneflow/core/framework/placement_utils.h" #include "oneflow/core/functional/functional.h" #include "oneflow/core/functional/tensor_index.h" -#include "oneflow/api/python/framework/tensor_functions.h" namespace py = pybind11; diff --git a/oneflow/api/python/framework/tensor_functions.h b/oneflow/api/python/framework/tensor_functions.h deleted file mode 100644 index 6b1359ab94d..00000000000 --- a/oneflow/api/python/framework/tensor_functions.h +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2020 The OneFlow Authors. All rights reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ -#ifndef ONEFLOW_API_PYTHON_FRAMEWORK_TENSOR_FUNCTIONS_H -#define ONEFLOW_API_PYTHON_FRAMEWORK_TENSOR_FUNCTIONS_H - -#include - -namespace oneflow { -namespace one { - -extern PyNumberMethods PyTensorObject_as_number; -extern PyMethodDef PyTensorObject_extra_methods[]; -extern PyObject* PyTensorObject_richcompare(PyObject*, PyObject*, int); - -} // namespace one -} // namespace oneflow - -#endif // ONEFLOW_API_PYTHON_FRAMEWORK_TENSOR_FUNCTIONS_H_ From e6e22e85491c522c83721edd9074de53a5d39c45 Mon Sep 17 00:00:00 2001 From: WangYi Date: Fri, 27 May 2022 11:55:01 +0800 Subject: [PATCH 11/50] move more api --- .../api/python/framework/tensor_functions.cpp | 105 +++++++++++++++++- python/oneflow/framework/tensor.py | 20 ++-- 2 files changed, 111 insertions(+), 14 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 7028243529d..04cdfc3d70d 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -729,8 +729,6 @@ static PyObject* PyTensorObject_norm(PyObject* self, PyObject* args, PyObject* k END_HANDLE_ERRORS } - - static PyObject* PyTensorObject_reshape(PyObject* self, PyObject* args) { HANDLE_ERRORS PyObject* shape = args; @@ -760,6 +758,95 @@ static PyObject* PyTensorObject_reshape_as(PyObject* self, PyObject* args, PyObj END_HANDLE_ERRORS } +static PyObject* PyTensorObject_relu(PyObject* self, PyObject* unused) { + HANDLE_ERRORS + auto tensor = PyTensor_Unpack(self); + return PyTensor_New(ASSERT_PTR(functional::Relu(tensor, false))); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_relu_(PyObject* self, PyObject* unused) { + HANDLE_ERRORS + auto tensor = PyTensor_Unpack(self); + return PyTensor_New(ASSERT_PTR(functional::Relu(tensor, true))); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_softmax(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + auto tensor = PyTensor_Unpack(self); + PyObject* dim = Py_None; + static const char* keywords[2] = {"dim", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:softmax", const_cast(keywords), + &dim)) { + return NULL; + } + if(dim == Py_None) + return PyTensor_New(ASSERT_PTR(functional::Softmax(tensor, NullOpt))); + CHECK_OR_THROW(PyLong_Check(dim)) << Error::TypeError() << "softmax(): argument 'dim' must be int64, not " + << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(dim))); + return PyTensor_New(ASSERT_PTR(functional::Softmax(tensor, PyLong_AsLong(dim)))); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_log_softmax(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + auto tensor = PyTensor_Unpack(self); + PyObject* dim = Py_None; + static const char* keywords[2] = {"dim", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:log_softmax", const_cast(keywords), + &dim)) { + return NULL; + } + if(dim == Py_None) + return PyTensor_New(ASSERT_PTR(functional::LogSoftmax(tensor, NullOpt))); + CHECK_OR_THROW(PyLong_Check(dim)) << Error::TypeError() << "log_softmax(): argument 'dim' must be int64, not " + << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(dim))); + return PyTensor_New(ASSERT_PTR(functional::LogSoftmax(tensor, PyLong_AsLong(dim)))); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_roll(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + PyObject* shifts = NULL; + PyObject* dims = Py_None; + static const char* keywords[3] = {"shifts", "dims", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|O:roll", const_cast(keywords), + &shifts, &dims)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(3, self, shifts, dims)); + return functional::roll(NULL, tuple.get(), NULL); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_chunk(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + PyObject* chunks = Py_None; + PyObject* dim = Py_None; + static const char* keywords[3] = {"chunks", "dim", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:chunk", const_cast(keywords), + &chunks, &dim)) { + return NULL; + } + PyObjectPtr tuple(PyTuple_Pack(3, self, chunks, dim)); + return functional::chunk(NULL, tuple.get(), NULL); + END_HANDLE_ERRORS +} + +#define DATATYPE_FUNC(func_name, dtype) \ + static PyObject* func_name(PyObject* self, PyObject* unused) { \ + HANDLE_ERRORS \ + auto tensor = PyTensor_Unpack(self); \ + return PyTensor_New(ASSERT_PTR(functional::To(tensor, dtype, false))); \ + END_HANDLE_ERRORS \ + } + +DATATYPE_FUNC(PyTensorObject_int, DType::Int32()); +DATATYPE_FUNC(PyTensorObject_long, DType::Int64()); +DATATYPE_FUNC(PyTensorObject_float, DType::Float()); +DATATYPE_FUNC(PyTensorObject_double, DType::Double()); + PyMethodDef PyTensorObject_extra_methods[] = { {"byte", PyTensorObject_byte, METH_NOARGS, NULL}, {"size", (PyCFunction)PyTensorObject_size, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -797,6 +884,16 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"norm", (PyCFunction)PyTensorObject_norm, METH_VARARGS | METH_KEYWORDS, NULL}, {"reshape", PyTensorObject_reshape, METH_VARARGS, NULL}, {"reshape_as", (PyCFunction)PyTensorObject_reshape_as, METH_VARARGS | METH_KEYWORDS, NULL}, + {"relu", PyTensorObject_relu, METH_NOARGS, NULL}, + {"relu_", PyTensorObject_relu_, METH_NOARGS, NULL}, + {"softmax", (PyCFunction)PyTensorObject_softmax, METH_VARARGS | METH_KEYWORDS, NULL}, + {"log_softmax", (PyCFunction)PyTensorObject_log_softmax, METH_VARARGS | METH_KEYWORDS, NULL}, + {"roll", (PyCFunction)PyTensorObject_roll, METH_VARARGS | METH_KEYWORDS, NULL}, + {"chunk", (PyCFunction)PyTensorObject_chunk, METH_VARARGS | METH_KEYWORDS, NULL}, + {"int", PyTensorObject_int, METH_NOARGS, NULL}, + {"long", PyTensorObject_long, METH_NOARGS, NULL}, + {"float", PyTensorObject_float, METH_NOARGS, NULL}, + {"double", PyTensorObject_double, METH_NOARGS, NULL}, // macro BINARY_METHOD {"floor_divide", (PyCFunction)PyTensorObject_floor_divide, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -804,7 +901,7 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"gt", (PyCFunction)PyTensorObject_gt, METH_VARARGS | METH_KEYWORDS, NULL}, {"ge", (PyCFunction)PyTensorObject_ge, METH_VARARGS | METH_KEYWORDS, NULL}, {"div", (PyCFunction)PyTensorObject_div, METH_VARARGS | METH_KEYWORDS, NULL}, - {"floor_divide", (PyCFunction)PyTensorObject_div, METH_VARARGS | METH_KEYWORDS, NULL}, + // {"floor_divide", (PyCFunction)PyTensorObject_div, METH_VARARGS | METH_KEYWORDS, NULL}, {"div_", (PyCFunction)PyTensorObject_div_, METH_VARARGS | METH_KEYWORDS, NULL}, {"mul", (PyCFunction)PyTensorObject_mul, METH_VARARGS | METH_KEYWORDS, NULL}, {"mul_", (PyCFunction)PyTensorObject_mul_, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -870,7 +967,7 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"isnan", PyTensorObject_isnan, METH_NOARGS, NULL}, {"isinf", PyTensorObject_isinf, METH_NOARGS, NULL}, {"logical_not", PyTensorObject_logical_not, METH_NOARGS, NULL}, - {"floor_divide", PyTensorObject_div, METH_O, NULL}, + // {"floor_divide", PyTensorObject_div, METH_O, NULL}, {"floor", PyTensorObject_floor, METH_NOARGS, NULL}, {"floor_", PyTensorObject_floor_, METH_NOARGS, NULL}, {NULL}, diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index 0a7ca52f689..ff17f9c84eb 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -1252,17 +1252,17 @@ def RegisterMethods(): Tensor.local_to_global = _local_to_global Tensor.global_to_global = _global_to_global Tensor.to_global = _to_global - Tensor.relu = _relu - Tensor.relu_ = _relu_inplace - Tensor.softmax = _softmax - Tensor.log_softmax = _log_softmax + # Tensor.relu = _relu + # Tensor.relu_ = _relu_inplace + # Tensor.softmax = _softmax + # Tensor.log_softmax = _log_softmax # Tensor.logical_and = _and # Tensor.logical_or = _or # Tensor.logical_not = _not # Tensor.logical_xor = _xor - Tensor.roll = _roll + # Tensor.roll = _roll # Tensor.bmm = _bmm - Tensor.chunk = _chunk + # Tensor.chunk = _chunk Tensor.repeat = _repeat Tensor.tile = _tile Tensor.split = _split @@ -1293,10 +1293,10 @@ def RegisterMethods(): Tensor.sort = _sort Tensor.type_as = _type_as Tensor.tolist = _tolist - Tensor.int = _int - Tensor.long = _long - Tensor.float = _float - Tensor.double = _double + # Tensor.int = _int + # Tensor.long = _long + # Tensor.float = _float + # Tensor.double = _double Tensor.is_floating_point = _is_floating_point Tensor.topk = _topk Tensor.nms = _nms From cac0e24414ad3f6b6059acf5e633683f79377133 Mon Sep 17 00:00:00 2001 From: WangYi Date: Fri, 27 May 2022 13:36:49 +0800 Subject: [PATCH 12/50] move more api, refine size --- oneflow/api/python/framework/tensor_functions.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 04cdfc3d70d..04ac9e4e014 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -55,6 +55,8 @@ using functional::PyObjectPtr; NB_UNARY_FUNC(PyTensorObject_nb_absolute, functional::abs); NB_UNARY_FUNC(PyTensorObject_nb_negative, functional::negative); +// TODO: not implemented yet +// NB_UNARY_FUNC(PyTensorObject_positive, functional::positive); NB_BINARY_FUNC(PyTensorObject_nb_add, functional::add); NB_BINARY_FUNC(PyTensorObject_nb_sub, functional::sub); @@ -67,8 +69,6 @@ NB_BINARY_FUNC(PyTensorObject_nb_or, functional::logical_or); NB_BINARY_FUNC(PyTensorObject_nb_floor_div, functional::floor_divide); NB_BINARY_FUNC(PyTensorObject_nb_true_div, functional::div); NB_BINARY_FUNC(PyTensorObject_nb_matrix_multiply, functional::matmul); -// TODO: not implemented yet -// NB_UNARY_FUNC(PyTensorObject_positive, functional::positive); PyObject* PyTensorObject_nb_pow(PyObject* a, PyObject* b, PyObject* unsed) { HANDLE_ERRORS PyObjectPtr tuple(PyTuple_Pack(2, a, b)); @@ -287,14 +287,14 @@ static PyObject* PyTensorObject_get_device(PyObject* self, PyObject* unused) { static PyObject* PyTensorObject_size(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS std::cout << "cpython size" << std::endl; - PyObject* idx = NULL; + PyObject* idx = Py_None; static const char* keywords[2] = {"idx", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:size", const_cast(keywords), &idx)) { return NULL; } Shape shape = *PyTensor_Unpack(self)->shape(); PyObject* shape_object = TensorSize_NewFromShape(shape); - if (idx == NULL || idx == Py_None) return shape_object; + if (idx == Py_None) return shape_object; return shape_object->ob_type->tp_as_mapping->mp_subscript(shape_object, idx); END_HANDLE_ERRORS } From c16bc8dd1674d45055c35f7bd408c60b310901be Mon Sep 17 00:00:00 2001 From: WangYi Date: Fri, 27 May 2022 13:47:03 +0800 Subject: [PATCH 13/50] fix typo --- oneflow/api/python/framework/tensor_functions.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 04ac9e4e014..501689b6bf2 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -69,6 +69,7 @@ NB_BINARY_FUNC(PyTensorObject_nb_or, functional::logical_or); NB_BINARY_FUNC(PyTensorObject_nb_floor_div, functional::floor_divide); NB_BINARY_FUNC(PyTensorObject_nb_true_div, functional::div); NB_BINARY_FUNC(PyTensorObject_nb_matrix_multiply, functional::matmul); + PyObject* PyTensorObject_nb_pow(PyObject* a, PyObject* b, PyObject* unsed) { HANDLE_ERRORS PyObjectPtr tuple(PyTuple_Pack(2, a, b)); @@ -294,7 +295,7 @@ static PyObject* PyTensorObject_size(PyObject* self, PyObject* args, PyObject* k } Shape shape = *PyTensor_Unpack(self)->shape(); PyObject* shape_object = TensorSize_NewFromShape(shape); - if (idx == Py_None) return shape_object; + if (idx == NULL || idx == Py_None) return shape_object; return shape_object->ob_type->tp_as_mapping->mp_subscript(shape_object, idx); END_HANDLE_ERRORS } @@ -387,7 +388,7 @@ static PyObject* PyTensorObject_diagonal(PyObject* self, PyObject* args, PyObjec PyObject* dim1 = PyLong_FromLong(0); PyObject* dim2 = PyLong_FromLong(1); static const char* keywords[4] = {"offset", "dim1", "dim2", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOO:diag", const_cast(keywords), &offset, + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOO:diagonal", const_cast(keywords), &offset, &dim1, &dim2)) { return NULL; } From ba679d3f4bc51bc35a886edc7750b0348c6c48c8 Mon Sep 17 00:00:00 2001 From: WangYi Date: Fri, 27 May 2022 14:48:32 +0800 Subject: [PATCH 14/50] format code, remove useless include --- .../api/python/framework/tensor_functions.cpp | 85 ++++++++++--------- 1 file changed, 43 insertions(+), 42 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 501689b6bf2..21efe8f38f0 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -17,10 +17,8 @@ limitations under the License. #include #include "oneflow/api/python/exception/exception.h" #include "oneflow/api/python/framework/size.h" -#include "oneflow/api/python/framework/tensor.h" #include "oneflow/api/python/functional/common.h" #include "oneflow/api/python/functional/functional_api.yaml.pybind.h" -#include "oneflow/core/common/throw.h" #include "oneflow/core/common/shape_vec.h" #include "oneflow/core/functional/functional.h" #include "oneflow/core/common/shape.h" @@ -125,41 +123,41 @@ PyNumberMethods PyTensorObject_as_number = { PyTensorObject_nb_sub, // nb_subtract PyTensorObject_nb_mul, // nb_multiply PyTensorObject_nb_fmod, // nb_remainder - NULL, // nb_divmod + NULL, // nb_divmod PyTensorObject_nb_pow, // nb_power PyTensorObject_nb_negative, // nb_negative - NULL, // nb_positive + NULL, // nb_positive PyTensorObject_nb_absolute, // nb_absolute - NULL, // nb_bool + NULL, // nb_bool PyTensorObject_nb_invert, // nb_invert - NULL, // nb_lshift - NULL, // nb_rshift + NULL, // nb_lshift + NULL, // nb_rshift PyTensorObject_nb_and, // nb_and PyTensorObject_nb_xor, // nb_xor PyTensorObject_nb_or, // nb_or - NULL, // nb_int - NULL, // nb_reserved - NULL, // nb_float + NULL, // nb_int + NULL, // nb_reserved + NULL, // nb_float PyTensorObject_nb_inplace_add, // nb_inplace_add PyTensorObject_nb_inplace_sub, // nb_inplace_sub PyTensorObject_nb_inplace_mul, // nb_inplace_mul - NULL, // nb_inplace_remainder - NULL, // nb_inplace_pow - NULL, // nb_inplace_lshift - NULL, // nb_inplace_rshift - NULL, // nb_inplace_and - NULL, // nb_inplace_xor - NULL, // nb_inplace_or + NULL, // nb_inplace_remainder + NULL, // nb_inplace_pow + NULL, // nb_inplace_lshift + NULL, // nb_inplace_rshift + NULL, // nb_inplace_and + NULL, // nb_inplace_xor + NULL, // nb_inplace_or PyTensorObject_nb_floor_div, // nb_floor_div PyTensorObject_nb_true_div, // nb_true_div - NULL, // nb_inplace_floor_div + NULL, // nb_inplace_floor_div PyTensorObject_nb_inplace_true_div, // nb_inplace_true_div - NULL, // nb_index + NULL, // nb_index PyTensorObject_nb_matrix_multiply, // nb_matrix_multiply - NULL, // not implemented yet nb_inplace_matrix_multiply + NULL, // not implemented yet nb_inplace_matrix_multiply }; @@ -388,8 +386,8 @@ static PyObject* PyTensorObject_diagonal(PyObject* self, PyObject* args, PyObjec PyObject* dim1 = PyLong_FromLong(0); PyObject* dim2 = PyLong_FromLong(1); static const char* keywords[4] = {"offset", "dim1", "dim2", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOO:diagonal", const_cast(keywords), &offset, - &dim1, &dim2)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOO:diagonal", const_cast(keywords), + &offset, &dim1, &dim2)) { return NULL; } PyObjectPtr tuple(PyTuple_Pack(4, self, offset, dim1, dim2)); @@ -688,7 +686,8 @@ static PyObject* PyTensorObject_tril(PyObject* self, PyObject* args, PyObject* k std::cout << "cpython" << std::endl; PyObject* diagonal = PyLong_FromLong(0); static const char* keywords[4] = {"diagonal", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:tril", const_cast(keywords), &diagonal)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:tril", const_cast(keywords), + &diagonal)) { return NULL; } CHECK_OR_THROW(PyLong_Check(diagonal)) @@ -703,7 +702,8 @@ static PyObject* PyTensorObject_triu(PyObject* self, PyObject* args, PyObject* k std::cout << "cpython" << std::endl; PyObject* diagonal = PyLong_FromLong(0); static const char* keywords[4] = {"diagonal", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:triu", const_cast(keywords), &diagonal)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:triu", const_cast(keywords), + &diagonal)) { return NULL; } CHECK_OR_THROW(PyLong_Check(diagonal)) @@ -717,10 +717,11 @@ static PyObject* PyTensorObject_norm(PyObject* self, PyObject* args, PyObject* k HANDLE_ERRORS PyObject* p = Py_None; PyObject* dim = Py_None; - PyObject* keepdim= Py_False; + PyObject* keepdim = Py_False; PyObject* dtype = Py_None; - static const char* keywords[5] = {"p", "dim", "keepdim", "dtype" ,NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOOO:norm", const_cast(keywords), &p, &dim,&keepdim, &dtype)) { + static const char* keywords[5] = {"p", "dim", "keepdim", "dtype", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOOO:norm", const_cast(keywords), &p, + &dim, &keepdim, &dtype)) { return NULL; } PyObjectPtr tuple(PyTuple_Pack(4, self, p, dim, keepdim)); @@ -782,9 +783,9 @@ static PyObject* PyTensorObject_softmax(PyObject* self, PyObject* args, PyObject &dim)) { return NULL; } - if(dim == Py_None) - return PyTensor_New(ASSERT_PTR(functional::Softmax(tensor, NullOpt))); - CHECK_OR_THROW(PyLong_Check(dim)) << Error::TypeError() << "softmax(): argument 'dim' must be int64, not " + if (dim == Py_None) return PyTensor_New(ASSERT_PTR(functional::Softmax(tensor, NullOpt))); + CHECK_OR_THROW(PyLong_Check(dim)) + << Error::TypeError() << "softmax(): argument 'dim' must be int64, not " << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(dim))); return PyTensor_New(ASSERT_PTR(functional::Softmax(tensor, PyLong_AsLong(dim)))); END_HANDLE_ERRORS @@ -799,9 +800,9 @@ static PyObject* PyTensorObject_log_softmax(PyObject* self, PyObject* args, PyOb &dim)) { return NULL; } - if(dim == Py_None) - return PyTensor_New(ASSERT_PTR(functional::LogSoftmax(tensor, NullOpt))); - CHECK_OR_THROW(PyLong_Check(dim)) << Error::TypeError() << "log_softmax(): argument 'dim' must be int64, not " + if (dim == Py_None) return PyTensor_New(ASSERT_PTR(functional::LogSoftmax(tensor, NullOpt))); + CHECK_OR_THROW(PyLong_Check(dim)) + << Error::TypeError() << "log_softmax(): argument 'dim' must be int64, not " << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(dim))); return PyTensor_New(ASSERT_PTR(functional::LogSoftmax(tensor, PyLong_AsLong(dim)))); END_HANDLE_ERRORS @@ -812,8 +813,8 @@ static PyObject* PyTensorObject_roll(PyObject* self, PyObject* args, PyObject* k PyObject* shifts = NULL; PyObject* dims = Py_None; static const char* keywords[3] = {"shifts", "dims", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|O:roll", const_cast(keywords), - &shifts, &dims)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|O:roll", const_cast(keywords), &shifts, + &dims)) { return NULL; } PyObjectPtr tuple(PyTuple_Pack(3, self, shifts, dims)); @@ -826,8 +827,8 @@ static PyObject* PyTensorObject_chunk(PyObject* self, PyObject* args, PyObject* PyObject* chunks = Py_None; PyObject* dim = Py_None; static const char* keywords[3] = {"chunks", "dim", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:chunk", const_cast(keywords), - &chunks, &dim)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:chunk", const_cast(keywords), &chunks, + &dim)) { return NULL; } PyObjectPtr tuple(PyTuple_Pack(3, self, chunks, dim)); @@ -835,12 +836,12 @@ static PyObject* PyTensorObject_chunk(PyObject* self, PyObject* args, PyObject* END_HANDLE_ERRORS } -#define DATATYPE_FUNC(func_name, dtype) \ - static PyObject* func_name(PyObject* self, PyObject* unused) { \ - HANDLE_ERRORS \ - auto tensor = PyTensor_Unpack(self); \ +#define DATATYPE_FUNC(func_name, dtype) \ + static PyObject* func_name(PyObject* self, PyObject* unused) { \ + HANDLE_ERRORS \ + auto tensor = PyTensor_Unpack(self); \ return PyTensor_New(ASSERT_PTR(functional::To(tensor, dtype, false))); \ - END_HANDLE_ERRORS \ + END_HANDLE_ERRORS \ } DATATYPE_FUNC(PyTensorObject_int, DType::Int32()); From bb6100515efab83acdfa712f32a941a330e1a9ab Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Mon, 30 May 2022 14:34:48 +0800 Subject: [PATCH 15/50] refine code --- .../api/python/framework/tensor_functions.cpp | 162 ++++++++++-------- 1 file changed, 86 insertions(+), 76 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 21efe8f38f0..252212d7362 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -15,10 +15,13 @@ limitations under the License. */ #include +#include +#include #include "oneflow/api/python/exception/exception.h" #include "oneflow/api/python/framework/size.h" #include "oneflow/api/python/functional/common.h" #include "oneflow/api/python/functional/functional_api.yaml.pybind.h" +#include "oneflow/core/common/device_type.pb.h" #include "oneflow/core/common/shape_vec.h" #include "oneflow/core/functional/functional.h" #include "oneflow/core/common/shape.h" @@ -94,7 +97,7 @@ static PyObject* PyTensorObject_nb_invert(PyObject* self) { PyObjectPtr tuple(PyTuple_Pack(2, a, b)); \ PyObjectPtr dict(PyDict_New()); \ CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "inplace", Py_True) > -1); \ - const auto& result = bind_func(NULL, tuple.get(), dict.get()); \ + auto* result = bind_func(NULL, tuple.get(), dict.get()); \ if (PyErr_Occurred()) { throw py::error_already_set(); } \ return result; \ END_HANDLE_ERRORS \ @@ -225,7 +228,9 @@ UNARY_METHOD(PyTensorObject_logical_not, functional::LogicalNot); return NULL; \ } \ PyObjectPtr tuple(PyTuple_Pack(2, self, other)); \ - return bind_func(NULL, tuple.get(), NULL); \ + auto* result = bind_func(NULL, tuple.get(), NULL); \ + if (PyErr_Occurred()) { throw py::error_already_set(); } \ + return result; \ END_HANDLE_ERRORS \ } @@ -276,7 +281,7 @@ static PyObject* PyTensorObject_element_size(PyObject* self, PyObject* unused) { static PyObject* PyTensorObject_get_device(PyObject* self, PyObject* unused) { HANDLE_ERRORS - auto device_type = ASSERT(PyTensor_Unpack(self)->device())->enum_type(); + DeviceType device_type = ASSERT(PyTensor_Unpack(self)->device())->enum_type(); CHECK_OR_THROW(device_type == DeviceType::kCUDA) << "get_device is only available for GPU tensor."; return functional::CastToPyObject(ASSERT(PyTensor_Unpack(self)->device())->device_id()); @@ -312,7 +317,9 @@ static PyObject* PyTensorObject_argmax(PyObject* self, PyObject* args, PyObject* PyObjectPtr dict(PyDict_New()); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "dim", dim) > -1); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "keepdim", keepdim) > -1); - return functional::argmax(NULL, tuple.get(), dict.get()); + PyObject* result = functional::argmax(NULL, tuple.get(), dict.get()); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } @@ -330,7 +337,9 @@ static PyObject* PyTensorObject_argmin(PyObject* self, PyObject* args, PyObject* PyObjectPtr dict(PyDict_New()); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "dim", dim) > -1); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "keepdim", keepdim) > -1); - return functional::argmin(NULL, tuple.get(), dict.get()); + PyObject* result = functional::argmin(NULL, tuple.get(), dict.get()); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } @@ -348,7 +357,9 @@ static PyObject* PyTensorObject_amin(PyObject* self, PyObject* args, PyObject* k PyObjectPtr dict(PyDict_New()); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "dim", dim) > -1); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "keepdim", keepdim) > -1); - return functional::amin(NULL, tuple.get(), dict.get()); + PyObject* result = functional::amin(NULL, tuple.get(), dict.get()); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } @@ -361,63 +372,39 @@ static PyObject* PyTensorObject_cast(PyObject* self, PyObject* args, PyObject* k return NULL; } PyObjectPtr tuple(PyTuple_Pack(2, self, dtype)); - return functional::cast(NULL, tuple.get(), NULL); + PyObject* result = functional::cast(NULL, tuple.get(), NULL); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } static PyObject* PyTensorObject_diag(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS std::cout << "cpython" << std::endl; - PyObject* diagonal = PyLong_FromLong(0); + int32_t diagonal = 0; static const char* keywords[2] = {"diagonal", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:diag", const_cast(keywords), + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i:diag", const_cast(keywords), &diagonal)) { return NULL; } - PyObjectPtr tuple(PyTuple_Pack(2, self, diagonal)); - return functional::diag(NULL, tuple.get(), NULL); + return PyTensor_New(ASSERT_PTR(functional::Diag(PyTensor_Unpack(self), diagonal))); END_HANDLE_ERRORS } static PyObject* PyTensorObject_diagonal(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS std::cout << "cpython" << std::endl; - PyObject* offset = PyLong_FromLong(0); - PyObject* dim1 = PyLong_FromLong(0); - PyObject* dim2 = PyLong_FromLong(1); + int32_t offset = 0; + int32_t dim1 = 0; + int32_t dim2 = 1; static const char* keywords[4] = {"offset", "dim1", "dim2", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOO:diagonal", const_cast(keywords), + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|iii:diagonal", const_cast(keywords), &offset, &dim1, &dim2)) { return NULL; } - PyObjectPtr tuple(PyTuple_Pack(4, self, offset, dim1, dim2)); - return functional::diagonal(NULL, tuple.get(), NULL); - END_HANDLE_ERRORS -} - -// static PyObject* PyTensorObject_add(PyObject* self, PyObject* args, PyObject* kwargs) { -// HANDLE_ERRORS -// std::cout << "cpython" << std::endl; -// PyObject* other = NULL; -// PyObject* alpha = PyFloat_FromDouble(1.0); -// static const char* keywords[3] = {"other", "alpha", NULL}; -// if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|$O:add", const_cast(keywords), -// &other, &alpha)) { -// return NULL; -// } -// std::cout << "run1" << std::endl; -// if(other == NULL) -// std::cout << "other null" << std::endl; -// std::cout << "run2" << std::endl; -// PyObjectPtr tuple(PyTuple_Pack(3, self, other, alpha));//, other, alpha)); -// // PyObjectPtr dict(PyDict_New()); -// PyObject* dict = PyDict_New(); -// CHECK_OR_THROW(PyDict_SetItemString(dict, "alpha", alpha) > -1); -// CHECK_OR_THROW(PyDict_SetItemString(dict, "inplace", Py_False) > -1); -// std::cout << "run3" << std::endl; -// return functional::add(NULL, tuple.get(), dict); -// END_HANDLE_ERRORS -// } + return PyTensor_New(ASSERT_PTR(functional::Diagonal(PyTensor_Unpack(self), offset, dim1, dim2))); + END_HANDLE_ERRORS +} static PyObject* PyTensorObject_addcmul(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS @@ -433,7 +420,9 @@ static PyObject* PyTensorObject_addcmul(PyObject* self, PyObject* args, PyObject PyObjectPtr tuple(PyTuple_Pack(3, self, tensor1, tensor2)); PyObjectPtr dict(PyDict_New()); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "value", value) > -1); - return functional::addcmul(NULL, tuple.get(), dict.get()); + PyObject* result = functional::addcmul(NULL, tuple.get(), dict.get()); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } @@ -451,7 +440,9 @@ static PyObject* PyTensorObject_addcmul_(PyObject* self, PyObject* args, PyObjec PyObjectPtr tuple(PyTuple_Pack(3, self, tensor1, tensor2)); PyObjectPtr dict(PyDict_New()); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "value", value) > -1); - return functional::addcmul_(NULL, tuple.get(), dict.get()); + PyObject* result = functional::addcmul_(NULL, tuple.get(), dict.get()); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } @@ -466,7 +457,9 @@ static PyObject* PyTensorObject_sub_(PyObject* self, PyObject* args, PyObject* k PyObjectPtr tuple(PyTuple_Pack(2, self, other)); PyObjectPtr dict(PyDict_New()); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "inplace", Py_True) > -1); - return functional::sub(NULL, tuple.get(), dict.get()); + PyObject* result = functional::sub(NULL, tuple.get(), dict.get()); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } @@ -484,7 +477,9 @@ static PyObject* PyTensorObject_clamp(PyObject* self, PyObject* args, PyObject* PyObjectPtr dict(PyDict_New()); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "min", min) > -1); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "max", max) > -1); - return functional::clamp(NULL, tuple.get(), dict.get()); + PyObject* result = functional::clamp(NULL, tuple.get(), dict.get()); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } @@ -502,7 +497,10 @@ static PyObject* PyTensorObject_clamp_(PyObject* self, PyObject* args, PyObject* PyObjectPtr dict(PyDict_New()); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "min", min) > -1); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "max", max) > -1); - return functional::clamp_(NULL, tuple.get(), dict.get()); + PyObject* result = functional::clamp_(NULL, tuple.get(), dict.get()); + if (PyErr_Occurred()) { throw py::error_already_set(); } + + return result; END_HANDLE_ERRORS } @@ -538,40 +536,36 @@ static PyObject* PyTensorObject_clip_(PyObject* self, PyObject* args, PyObject* PyObjectPtr dict(PyDict_New()); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "min", min) > -1); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "max", max) > -1); - return functional::clip_(NULL, tuple.get(), dict.get()); + PyObject* result = functional::clip_(NULL, tuple.get(), dict.get()); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } static PyObject* PyTensorObject_cpu(PyObject* self, PyObject* unused) { HANDLE_ERRORS std::cout << "cpython" << std::endl; - PyObjectPtr dict(PyDict_New()); - PyObjectPtr device(PyUnicode_FromString("cpu")); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "device", device.get()) > -1); - PyObjectPtr tuple(PyTuple_Pack(1, self)); - return functional::to(NULL, tuple.get(), dict.get()); + Optional device = "cpu"; + return PyTensor_New(ASSERT_PTR(functional::To(PyTensor_Unpack(self), device, NullOpt, false))); END_HANDLE_ERRORS } static PyObject* PyTensorObject_cuda(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS std::cout << "cpython" << std::endl; - PyObject* device = Py_None; + PyObject* device_obj = Py_None; + Optional device = ""; static const char* keywords[2] = {"device", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:cuda", const_cast(keywords), - &device)) { + &device_obj)) { return NULL; } - if (device == Py_None) - device = PyUnicode_FromString("cuda"); - else if (PyLong_Check(device)) { - device = PyUnicode_Concat(PyUnicode_FromString("cuda:"), - PyUnicode_FromFormat("%d", PyLong_AsLong(device))); + if (device_obj == Py_None) + device = "cuda"; + else if (PyLong_Check(device_obj)) { + device = "cuda:" + std::to_string(PyLong_AsLong(device_obj)); } - PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "device", device) > -1); - PyObjectPtr tuple(PyTuple_Pack(1, self)); - return functional::to(NULL, tuple.get(), dict.get()); + return PyTensor_New(ASSERT_PTR(functional::To(PyTensor_Unpack(self), device, NullOpt, false))); END_HANDLE_ERRORS } @@ -585,7 +579,9 @@ static PyObject* PyTensorObject_in_top_k(PyObject* self, PyObject* args, PyObjec return NULL; } PyObjectPtr tuple(PyTuple_Pack(3, self, predictions, k)); - return functional::in_top_k(NULL, tuple.get(), NULL); + PyObject* result = functional::in_top_k(NULL, tuple.get(), NULL); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } @@ -599,7 +595,9 @@ static PyObject* PyTensorObject_index_select(PyObject* self, PyObject* args, PyO return NULL; } PyObjectPtr tuple(PyTuple_Pack(3, self, dim, index)); - return functional::index_select(NULL, tuple.get(), NULL); + PyObject* result = functional::index_select(NULL, tuple.get(), NULL); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } @@ -625,9 +623,9 @@ static PyObject* PyTensorObject_maximum(PyObject* self, PyObject* args, PyObject return NULL; } CHECK_OR_THROW(PyTensor_Check(y)) - << Error::TypeError() << "minimum(): argument 'other' must be tensor, not " + << Error::TypeError() << "maximum(): argument 'other' must be tensor, not " << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(y))); - return PyTensor_New(ASSERT_PTR(functional::Minimum(PyTensor_Unpack(self), PyTensor_Unpack(y)))); + return PyTensor_New(ASSERT_PTR(functional::Maximum(PyTensor_Unpack(self), PyTensor_Unpack(y)))); END_HANDLE_ERRORS } @@ -639,7 +637,9 @@ static PyObject* PyTensorObject_pow(PyObject* self, PyObject* args, PyObject* kw return NULL; } PyObjectPtr tuple(PyTuple_Pack(2, self, b)); - return functional::pow(NULL, tuple.get(), NULL); + PyObject* result = functional::pow(NULL, tuple.get(), NULL); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } @@ -658,7 +658,9 @@ static PyObject* PyTensorObject_var(PyObject* self, PyObject* args, PyObject* kw PyDict_SetItemString(dict.get(), "dim", dim); PyDict_SetItemString(dict.get(), "unbiased", unbiased); PyDict_SetItemString(dict.get(), "keepdim", keepdim); - return functional::var(NULL, tuple.get(), dict.get()); + PyObject* result = functional::var(NULL, tuple.get(), dict.get()); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } @@ -677,7 +679,9 @@ static PyObject* PyTensorObject_std(PyObject* self, PyObject* args, PyObject* kw PyDict_SetItemString(dict.get(), "dim", dim); PyDict_SetItemString(dict.get(), "unbiased", unbiased); PyDict_SetItemString(dict.get(), "keepdim", keepdim); - return functional::std(NULL, tuple.get(), dict.get()); + PyObject* result = functional::std(NULL, tuple.get(), dict.get()); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } @@ -727,7 +731,9 @@ static PyObject* PyTensorObject_norm(PyObject* self, PyObject* args, PyObject* k PyObjectPtr tuple(PyTuple_Pack(4, self, p, dim, keepdim)); PyObjectPtr dict(PyDict_New()); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "dtype", dtype) > -1); - return functional::norm(NULL, tuple.get(), dict.get()); + PyObject* result = functional::norm(NULL, tuple.get(), dict.get()); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } @@ -818,7 +824,9 @@ static PyObject* PyTensorObject_roll(PyObject* self, PyObject* args, PyObject* k return NULL; } PyObjectPtr tuple(PyTuple_Pack(3, self, shifts, dims)); - return functional::roll(NULL, tuple.get(), NULL); + PyObject* result = functional::roll(NULL, tuple.get(), NULL); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } @@ -832,7 +840,9 @@ static PyObject* PyTensorObject_chunk(PyObject* self, PyObject* args, PyObject* return NULL; } PyObjectPtr tuple(PyTuple_Pack(3, self, chunks, dim)); - return functional::chunk(NULL, tuple.get(), NULL); + PyObject* result = functional::chunk(NULL, tuple.get(), NULL); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } @@ -884,7 +894,7 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"tril", (PyCFunction)PyTensorObject_tril, METH_VARARGS | METH_KEYWORDS, NULL}, {"triu", (PyCFunction)PyTensorObject_triu, METH_VARARGS | METH_KEYWORDS, NULL}, {"norm", (PyCFunction)PyTensorObject_norm, METH_VARARGS | METH_KEYWORDS, NULL}, - {"reshape", PyTensorObject_reshape, METH_VARARGS, NULL}, + {"reshape", (PyCFunction)PyTensorObject_reshape, METH_VARARGS | METH_KEYWORDS, NULL}, {"reshape_as", (PyCFunction)PyTensorObject_reshape_as, METH_VARARGS | METH_KEYWORDS, NULL}, {"relu", PyTensorObject_relu, METH_NOARGS, NULL}, {"relu_", PyTensorObject_relu_, METH_NOARGS, NULL}, From 400d43c5af899eb0d976ae5f7e0e66abc3e81142 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Mon, 30 May 2022 15:16:51 +0800 Subject: [PATCH 16/50] refine code, fix typo --- oneflow/api/python/framework/tensor_functions.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 23a9cf4498f..ae914159bc2 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -296,8 +296,8 @@ static PyObject* PyTensorObject_size(PyObject* self, PyObject* args, PyObject* k if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:size", const_cast(keywords), &idx)) { return NULL; } - Shape shape = *PyTensor_Unpack(self)->shape(); - PyObject* shape_object = TensorSize_NewFromShape(shape); + auto shape = PyTensor_Unpack(self)->shape(); + PyObject* shape_object = TensorSize_NewFromShape(*shape); if (idx == NULL || idx == Py_None) return shape_object; return shape_object->ob_type->tp_as_mapping->mp_subscript(shape_object, idx); END_HANDLE_ERRORS @@ -528,7 +528,7 @@ static PyObject* PyTensorObject_clip_(PyObject* self, PyObject* args, PyObject* PyObject* min = Py_None; PyObject* max = Py_None; static const char* keywords[3] = {"min", "max", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:clip", const_cast(keywords), &min, + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:clip_", const_cast(keywords), &min, &max)) { return NULL; } From ebac1b014c02c1de68479d96d3b7d295d4c6f148 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Mon, 30 May 2022 16:47:08 +0800 Subject: [PATCH 17/50] align .cuda to python --- .../api/python/framework/tensor_functions.cpp | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index ae914159bc2..d90f712c8f7 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -554,18 +554,25 @@ static PyObject* PyTensorObject_cuda(PyObject* self, PyObject* args, PyObject* k HANDLE_ERRORS std::cout << "cpython" << std::endl; PyObject* device_obj = Py_None; - Optional device = ""; static const char* keywords[2] = {"device", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:cuda", const_cast(keywords), &device_obj)) { return NULL; } - if (device_obj == Py_None) - device = "cuda"; - else if (PyLong_Check(device_obj)) { - device = "cuda:" + std::to_string(PyLong_AsLong(device_obj)); + PyObjectPtr dict(PyDict_New()); + if (device_obj == Py_None) { + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "device", PyUnicode_FromString("cuda")) > -1); + } else if (PyLong_Check(device_obj)) { + std::string device = "cuda:" + std::to_string(PyLong_AsLong(device_obj)); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "device", PyUnicode_FromString(device.data())) + > -1); + } else { + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "device", device_obj) > -1); } - return PyTensor_New(ASSERT_PTR(functional::To(PyTensor_Unpack(self), device, NullOpt, false))); + PyObjectPtr tuple(PyTuple_Pack(1, self)); + PyObject* result = functional::to(NULL, tuple.get(), dict.get()); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; END_HANDLE_ERRORS } @@ -931,7 +938,6 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"cast", (PyCFunction)PyTensorObject_cast, METH_VARARGS | METH_KEYWORDS, NULL}, {"diag", (PyCFunction)PyTensorObject_diag, METH_VARARGS | METH_KEYWORDS, NULL}, {"diagonal", (PyCFunction)PyTensorObject_diagonal, METH_VARARGS | METH_KEYWORDS, NULL}, - // {"add", (PyCFunction)PyTensorObject_add, METH_VARARGS | METH_KEYWORDS, NULL}, {"addcmul", (PyCFunction)PyTensorObject_addcmul, METH_VARARGS | METH_KEYWORDS, NULL}, {"addcmul_", (PyCFunction)PyTensorObject_addcmul, METH_VARARGS | METH_KEYWORDS, NULL}, {"sub_", (PyCFunction)PyTensorObject_sub_, METH_VARARGS | METH_KEYWORDS, NULL}, From 0e03a6028ed808b909ce08cad820482aa6bf43db Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Mon, 30 May 2022 17:15:37 +0800 Subject: [PATCH 18/50] refine code --- oneflow/api/python/framework/tensor_functions.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index d90f712c8f7..877520aeb90 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -15,13 +15,10 @@ limitations under the License. */ #include -#include -#include #include "oneflow/api/python/exception/exception.h" #include "oneflow/api/python/framework/size.h" #include "oneflow/api/python/functional/common.h" #include "oneflow/api/python/functional/functional_api.yaml.pybind.h" -#include "oneflow/core/common/device_type.pb.h" #include "oneflow/core/common/shape_vec.h" #include "oneflow/core/functional/functional.h" #include "oneflow/core/common/shape.h" @@ -243,8 +240,6 @@ BINARY_METHOD(PyTensorObject_div_, functional::div_, "div_"); BINARY_METHOD(PyTensorObject_mul, functional::mul, "mul"); BINARY_METHOD(PyTensorObject_mul_, functional::mul_, "mul_"); BINARY_METHOD(PyTensorObject_sub, functional::sub, "sub"); -// TODO: not implemented yet -// BINARY_METHOD(PyTensorObject_sub, functional::sub, "sub_"); BINARY_METHOD(PyTensorObject_fmod, functional::fmod, "fmod"); BINARY_METHOD(PyTensorObject_matmul, functional::matmul, "matmul"); BINARY_METHOD(PyTensorObject_logical_and, functional::logical_and, "logical_and"); From 4b0f22766f9805b380951d1d66819605339accd9 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Mon, 30 May 2022 17:24:42 +0800 Subject: [PATCH 19/50] split some api to part3 for review --- .../api/python/framework/tensor_functions.cpp | 383 ------------------ python/oneflow/framework/tensor.py | 42 +- 2 files changed, 21 insertions(+), 404 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 877520aeb90..234e7dffa9a 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -458,287 +458,6 @@ static PyObject* PyTensorObject_sub_(PyObject* self, PyObject* args, PyObject* k END_HANDLE_ERRORS } -static PyObject* PyTensorObject_clamp(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - std::cout << "cpython" << std::endl; - PyObject* min = Py_None; - PyObject* max = Py_None; - static const char* keywords[3] = {"min", "max", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:clamp", const_cast(keywords), &min, - &max)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(1, self)); - PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "min", min) > -1); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "max", max) > -1); - PyObject* result = functional::clamp(NULL, tuple.get(), dict.get()); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_clamp_(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - std::cout << "cpython" << std::endl; - PyObject* min = Py_None; - PyObject* max = Py_None; - static const char* keywords[3] = {"min", "max", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:clamp_", const_cast(keywords), &min, - &max)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(1, self)); - PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "min", min) > -1); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "max", max) > -1); - PyObject* result = functional::clamp_(NULL, tuple.get(), dict.get()); - if (PyErr_Occurred()) { throw py::error_already_set(); } - - return result; - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_clip(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - std::cout << "cpython" << std::endl; - PyObject* min = Py_None; - PyObject* max = Py_None; - static const char* keywords[3] = {"min", "max", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:clip", const_cast(keywords), &min, - &max)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(1, self)); - PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "min", min) > -1); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "max", max) > -1); - return functional::clip(NULL, tuple.get(), dict.get()); - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_clip_(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - std::cout << "cpython" << std::endl; - PyObject* min = Py_None; - PyObject* max = Py_None; - static const char* keywords[3] = {"min", "max", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:clip_", const_cast(keywords), &min, - &max)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(1, self)); - PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "min", min) > -1); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "max", max) > -1); - PyObject* result = functional::clip_(NULL, tuple.get(), dict.get()); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_cpu(PyObject* self, PyObject* unused) { - HANDLE_ERRORS - std::cout << "cpython" << std::endl; - Optional device = "cpu"; - return PyTensor_New(ASSERT_PTR(functional::To(PyTensor_Unpack(self), device, NullOpt, false))); - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_cuda(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - std::cout << "cpython" << std::endl; - PyObject* device_obj = Py_None; - static const char* keywords[2] = {"device", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:cuda", const_cast(keywords), - &device_obj)) { - return NULL; - } - PyObjectPtr dict(PyDict_New()); - if (device_obj == Py_None) { - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "device", PyUnicode_FromString("cuda")) > -1); - } else if (PyLong_Check(device_obj)) { - std::string device = "cuda:" + std::to_string(PyLong_AsLong(device_obj)); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "device", PyUnicode_FromString(device.data())) - > -1); - } else { - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "device", device_obj) > -1); - } - PyObjectPtr tuple(PyTuple_Pack(1, self)); - PyObject* result = functional::to(NULL, tuple.get(), dict.get()); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_in_top_k(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - PyObject* predictions = NULL; - PyObject* k = NULL; - static const char* keywords[3] = {"predictions", "k", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO:in_top_k", const_cast(keywords), - &predictions, &k)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(3, self, predictions, k)); - PyObject* result = functional::in_top_k(NULL, tuple.get(), NULL); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_index_select(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - PyObject* dim = NULL; - PyObject* index = NULL; - static const char* keywords[3] = {"dim", "index", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO:index_select", const_cast(keywords), - &dim, &index)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(3, self, dim, index)); - PyObject* result = functional::index_select(NULL, tuple.get(), NULL); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_minimum(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - PyObject* y = NULL; - static const char* keywords[2] = {"y", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:minimum", const_cast(keywords), &y)) { - return NULL; - } - CHECK_OR_THROW(PyTensor_Check(y)) - << Error::TypeError() << "minimum(): argument 'other' must be tensor, not " - << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(y))); - return PyTensor_New(ASSERT_PTR(functional::Minimum(PyTensor_Unpack(self), PyTensor_Unpack(y)))); - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_maximum(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - PyObject* y = NULL; - static const char* keywords[2] = {"y", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:maximum", const_cast(keywords), &y)) { - return NULL; - } - CHECK_OR_THROW(PyTensor_Check(y)) - << Error::TypeError() << "maximum(): argument 'other' must be tensor, not " - << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(y))); - return PyTensor_New(ASSERT_PTR(functional::Maximum(PyTensor_Unpack(self), PyTensor_Unpack(y)))); - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_pow(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - PyObject* b = NULL; - static const char* keywords[2] = {"b", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:pow", const_cast(keywords), &b)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(2, self, b)); - PyObject* result = functional::pow(NULL, tuple.get(), NULL); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_var(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - PyObject* dim = Py_None; - PyObject* unbiased = Py_True; - PyObject* keepdim = Py_False; - static const char* keywords[4] = {"dim", "unbiased", "keepdim", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOO:var", const_cast(keywords), &dim, - &unbiased, &keepdim)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(1, self)); - PyObjectPtr dict(PyDict_New()); - PyDict_SetItemString(dict.get(), "dim", dim); - PyDict_SetItemString(dict.get(), "unbiased", unbiased); - PyDict_SetItemString(dict.get(), "keepdim", keepdim); - PyObject* result = functional::var(NULL, tuple.get(), dict.get()); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_std(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - PyObject* dim = Py_None; - PyObject* unbiased = Py_True; - PyObject* keepdim = Py_False; - static const char* keywords[4] = {"dim", "unbiased", "keepdim", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOO:std", const_cast(keywords), &dim, - &unbiased, &keepdim)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(1, self)); - PyObjectPtr dict(PyDict_New()); - PyDict_SetItemString(dict.get(), "dim", dim); - PyDict_SetItemString(dict.get(), "unbiased", unbiased); - PyDict_SetItemString(dict.get(), "keepdim", keepdim); - PyObject* result = functional::std(NULL, tuple.get(), dict.get()); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_tril(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - std::cout << "cpython" << std::endl; - PyObject* diagonal = PyLong_FromLong(0); - static const char* keywords[4] = {"diagonal", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:tril", const_cast(keywords), - &diagonal)) { - return NULL; - } - CHECK_OR_THROW(PyLong_Check(diagonal)) - << Error::TypeError() << "tril(): argument 'diagonal' must be int64, not " - << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(diagonal))); - return PyTensor_New(ASSERT_PTR(functional::Tril(PyTensor_Unpack(self), PyLong_AsLong(diagonal)))); - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_triu(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - std::cout << "cpython" << std::endl; - PyObject* diagonal = PyLong_FromLong(0); - static const char* keywords[4] = {"diagonal", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:triu", const_cast(keywords), - &diagonal)) { - return NULL; - } - CHECK_OR_THROW(PyLong_Check(diagonal)) - << Error::TypeError() << "triu(): argument 'diagonal' must be int64, not " - << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(diagonal))); - return PyTensor_New(ASSERT_PTR(functional::Triu(PyTensor_Unpack(self), PyLong_AsLong(diagonal)))); - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_norm(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - PyObject* p = Py_None; - PyObject* dim = Py_None; - PyObject* keepdim = Py_False; - PyObject* dtype = Py_None; - static const char* keywords[5] = {"p", "dim", "keepdim", "dtype", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOOO:norm", const_cast(keywords), &p, - &dim, &keepdim, &dtype)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(4, self, p, dim, keepdim)); - PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "dtype", dtype) > -1); - PyObject* result = functional::norm(NULL, tuple.get(), dict.get()); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - static PyObject* PyTensorObject_reshape(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS PyObject* shape = args; @@ -767,86 +486,6 @@ static PyObject* PyTensorObject_reshape_as(PyObject* self, PyObject* args, PyObj END_HANDLE_ERRORS } -static PyObject* PyTensorObject_relu(PyObject* self, PyObject* unused) { - HANDLE_ERRORS - auto tensor = PyTensor_Unpack(self); - return PyTensor_New(ASSERT_PTR(functional::Relu(tensor, false))); - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_relu_(PyObject* self, PyObject* unused) { - HANDLE_ERRORS - auto tensor = PyTensor_Unpack(self); - return PyTensor_New(ASSERT_PTR(functional::Relu(tensor, true))); - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_softmax(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - auto tensor = PyTensor_Unpack(self); - PyObject* dim = Py_None; - static const char* keywords[2] = {"dim", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:softmax", const_cast(keywords), - &dim)) { - return NULL; - } - if (dim == Py_None) return PyTensor_New(ASSERT_PTR(functional::Softmax(tensor, NullOpt))); - CHECK_OR_THROW(PyLong_Check(dim)) - << Error::TypeError() << "softmax(): argument 'dim' must be int64, not " - << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(dim))); - return PyTensor_New(ASSERT_PTR(functional::Softmax(tensor, PyLong_AsLong(dim)))); - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_log_softmax(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - auto tensor = PyTensor_Unpack(self); - PyObject* dim = Py_None; - static const char* keywords[2] = {"dim", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:log_softmax", const_cast(keywords), - &dim)) { - return NULL; - } - if (dim == Py_None) return PyTensor_New(ASSERT_PTR(functional::LogSoftmax(tensor, NullOpt))); - CHECK_OR_THROW(PyLong_Check(dim)) - << Error::TypeError() << "log_softmax(): argument 'dim' must be int64, not " - << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(dim))); - return PyTensor_New(ASSERT_PTR(functional::LogSoftmax(tensor, PyLong_AsLong(dim)))); - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_roll(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - PyObject* shifts = NULL; - PyObject* dims = Py_None; - static const char* keywords[3] = {"shifts", "dims", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|O:roll", const_cast(keywords), &shifts, - &dims)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(3, self, shifts, dims)); - PyObject* result = functional::roll(NULL, tuple.get(), NULL); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_chunk(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - PyObject* chunks = Py_None; - PyObject* dim = Py_None; - static const char* keywords[3] = {"chunks", "dim", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:chunk", const_cast(keywords), &chunks, - &dim)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(3, self, chunks, dim)); - PyObject* result = functional::chunk(NULL, tuple.get(), NULL); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - #define DATATYPE_FUNC(func_name, dtype) \ static PyObject* func_name(PyObject* self, PyObject* unused) { \ HANDLE_ERRORS \ @@ -936,28 +575,6 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"addcmul", (PyCFunction)PyTensorObject_addcmul, METH_VARARGS | METH_KEYWORDS, NULL}, {"addcmul_", (PyCFunction)PyTensorObject_addcmul, METH_VARARGS | METH_KEYWORDS, NULL}, {"sub_", (PyCFunction)PyTensorObject_sub_, METH_VARARGS | METH_KEYWORDS, NULL}, - {"clamp", (PyCFunction)PyTensorObject_clamp, METH_VARARGS | METH_KEYWORDS, NULL}, - {"clamp_", (PyCFunction)PyTensorObject_clamp_, METH_VARARGS | METH_KEYWORDS, NULL}, - {"clip", (PyCFunction)PyTensorObject_clip, METH_VARARGS | METH_KEYWORDS, NULL}, - {"clip_", (PyCFunction)PyTensorObject_clip_, METH_VARARGS | METH_KEYWORDS, NULL}, - {"cpu", PyTensorObject_cpu, METH_NOARGS, NULL}, - {"cuda", (PyCFunction)PyTensorObject_cuda, METH_VARARGS | METH_KEYWORDS, NULL}, - {"in_top_k", (PyCFunction)PyTensorObject_in_top_k, METH_VARARGS | METH_KEYWORDS, NULL}, - {"index_select", (PyCFunction)PyTensorObject_index_select, METH_VARARGS | METH_KEYWORDS, NULL}, - {"minimum", (PyCFunction)PyTensorObject_minimum, METH_VARARGS | METH_KEYWORDS, NULL}, - {"maximum", (PyCFunction)PyTensorObject_maximum, METH_VARARGS | METH_KEYWORDS, NULL}, - {"pow", (PyCFunction)PyTensorObject_pow, METH_VARARGS | METH_KEYWORDS, NULL}, - {"var", (PyCFunction)PyTensorObject_var, METH_VARARGS | METH_KEYWORDS, NULL}, - {"std", (PyCFunction)PyTensorObject_std, METH_VARARGS | METH_KEYWORDS, NULL}, - {"tril", (PyCFunction)PyTensorObject_tril, METH_VARARGS | METH_KEYWORDS, NULL}, - {"triu", (PyCFunction)PyTensorObject_triu, METH_VARARGS | METH_KEYWORDS, NULL}, - {"norm", (PyCFunction)PyTensorObject_norm, METH_VARARGS | METH_KEYWORDS, NULL}, - {"relu", PyTensorObject_relu, METH_NOARGS, NULL}, - {"relu_", PyTensorObject_relu_, METH_NOARGS, NULL}, - {"softmax", (PyCFunction)PyTensorObject_softmax, METH_VARARGS | METH_KEYWORDS, NULL}, - {"log_softmax", (PyCFunction)PyTensorObject_log_softmax, METH_VARARGS | METH_KEYWORDS, NULL}, - {"roll", (PyCFunction)PyTensorObject_roll, METH_VARARGS | METH_KEYWORDS, NULL}, - {"chunk", (PyCFunction)PyTensorObject_chunk, METH_VARARGS | METH_KEYWORDS, NULL}, {"int", PyTensorObject_int, METH_NOARGS, NULL}, {"long", PyTensorObject_long, METH_NOARGS, NULL}, {"float", PyTensorObject_float, METH_NOARGS, NULL}, diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index 5f6f1b98c0d..9b67f1d137c 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -1188,47 +1188,47 @@ def RegisterMethods(): # Tensor.mul_ = _mul_ # Tensor.sub = _sub # Tensor.sub_ = _sub_inplace - # Tensor.clamp = _clamp - # Tensor.clamp_ = _clamp_ - # Tensor.clip = _clip - # Tensor.clip_ = _clip_ - # Tensor.cpu = _cpu - # Tensor.cuda = _cuda + Tensor.clamp = _clamp + Tensor.clamp_ = _clamp_ + Tensor.clip = _clip + Tensor.clip_ = _clip_ + Tensor.cpu = _cpu + Tensor.cuda = _cuda Tensor.expand = _expand Tensor.expand_as = _expand_as # Tensor.fmod = _fmod Tensor.flatten = _flatten Tensor.flip = _flip - # Tensor.in_top_k = _in_top_k - # Tensor.index_select = _index_select - # Tensor.minimum = _minimum - # Tensor.maximum = _maximum + Tensor.in_top_k = _in_top_k + Tensor.index_select = _index_select + Tensor.minimum = _minimum + Tensor.maximum = _maximum Tensor.new_empty = _new_empty Tensor.new_ones = _new_ones Tensor.new_zeros = _new_zeros - # Tensor.pow = _pow - # Tensor.var = _var - # Tensor.std = _std + Tensor.pow = _pow + Tensor.var = _var + Tensor.std = _std # Tensor.matmul = _matmul Tensor.softplus = _softplus - # Tensor.tril = _tril - # Tensor.triu = _triu + Tensor.tril = _tril + Tensor.triu = _triu Tensor.where = _where # Tensor.norm = _norm Tensor.local_to_global = _local_to_global Tensor.global_to_global = _global_to_global Tensor.to_global = _to_global - # Tensor.relu = _relu - # Tensor.relu_ = _relu_inplace - # Tensor.softmax = _softmax - # Tensor.log_softmax = _log_softmax + Tensor.relu = _relu + Tensor.relu_ = _relu_inplace + Tensor.softmax = _softmax + Tensor.log_softmax = _log_softmax # Tensor.logical_and = _and # Tensor.logical_or = _or # Tensor.logical_not = _not # Tensor.logical_xor = _xor - # Tensor.roll = _roll + Tensor.roll = _roll # Tensor.bmm = _bmm - # Tensor.chunk = _chunk + Tensor.chunk = _chunk Tensor.repeat = _repeat Tensor.tile = _tile Tensor.split = _split From 6ff364865ceeb03fdc78e50da0b978cfefcdf8b5 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Mon, 30 May 2022 21:41:25 +0800 Subject: [PATCH 20/50] remove positional only arguments of argmax and argmin --- oneflow/core/functional/functional_api.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/oneflow/core/functional/functional_api.yaml b/oneflow/core/functional/functional_api.yaml index ade808de699..4976974baa1 100755 --- a/oneflow/core/functional/functional_api.yaml +++ b/oneflow/core/functional/functional_api.yaml @@ -713,11 +713,11 @@ bind_python: True - name: "argmax" - signature: "Tensor (Tensor x, *, Int32 dim=None, Bool keepdim=None, DataType dtype=None) => ArgMax" + signature: "Tensor (Tensor x, Int32 dim=None, Bool keepdim=None, DataType dtype=None) => ArgMax" bind_python: True - name: "argmin" - signature: "Tensor (Tensor x, *, Int32 dim=None, Bool keepdim=None, DataType dtype=None) => ArgMin" + signature: "Tensor (Tensor x, Int32 dim=None, Bool keepdim=None, DataType dtype=None) => ArgMin" bind_python: True - name: "argwhere" From 29c4b34139b991d0b943003152a143142c99fdb6 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Mon, 30 May 2022 21:41:51 +0800 Subject: [PATCH 21/50] remove arguments parse --- .../api/python/framework/tensor_functions.cpp | 109 +++++------------- 1 file changed, 27 insertions(+), 82 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 234e7dffa9a..5b72b7eb65e 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -15,11 +15,15 @@ limitations under the License. */ #include +#include +#include +#include #include "oneflow/api/python/exception/exception.h" #include "oneflow/api/python/framework/size.h" #include "oneflow/api/python/functional/common.h" #include "oneflow/api/python/functional/functional_api.yaml.pybind.h" #include "oneflow/core/common/shape_vec.h" +#include "oneflow/core/common/throw.h" #include "oneflow/core/functional/functional.h" #include "oneflow/core/common/shape.h" @@ -31,6 +35,13 @@ namespace one { using functional::PyObjectPtr; +static PyObject* concat_self(PyObject* self, PyObject* args) { + PyObjectPtr self_tuple(PyTuple_Pack(1, self)); + PyObject* tuple = PySequence_Concat(self_tuple.get(), args); + CHECK_OR_THROW(tuple != NULL); + return tuple; +} + #define NB_UNARY_FUNC(func_name, bind_func) \ static PyObject* func_name(PyObject* self) { \ HANDLE_ERRORS \ @@ -68,7 +79,7 @@ NB_BINARY_FUNC(PyTensorObject_nb_floor_div, functional::floor_divide); NB_BINARY_FUNC(PyTensorObject_nb_true_div, functional::div); NB_BINARY_FUNC(PyTensorObject_nb_matrix_multiply, functional::matmul); -PyObject* PyTensorObject_nb_pow(PyObject* a, PyObject* b, PyObject* unsed) { +static PyObject* PyTensorObject_nb_pow(PyObject* a, PyObject* b, PyObject* unsed) { HANDLE_ERRORS PyObjectPtr tuple(PyTuple_Pack(2, a, b)); auto* result = functional::pow(NULL, tuple.get(), NULL); @@ -217,7 +228,6 @@ UNARY_METHOD(PyTensorObject_logical_not, functional::LogicalNot); #define BINARY_METHOD(func_name, bind_func, name) \ static PyObject* func_name(PyObject* self, PyObject* args, PyObject* kwargs) { \ HANDLE_ERRORS \ - std::cout << "cpython" << std::endl; \ PyObject* other = NULL; \ static const char* keywords[2] = {"other", NULL}; \ if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:" name, const_cast(keywords), \ @@ -285,7 +295,6 @@ static PyObject* PyTensorObject_get_device(PyObject* self, PyObject* unused) { static PyObject* PyTensorObject_size(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS - std::cout << "cpython size" << std::endl; PyObject* idx = Py_None; static const char* keywords[2] = {"idx", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:size", const_cast(keywords), &idx)) { @@ -300,19 +309,8 @@ static PyObject* PyTensorObject_size(PyObject* self, PyObject* args, PyObject* k static PyObject* PyTensorObject_argmax(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS - std::cout << "cpython argmax" << std::endl; - PyObject* dim = Py_None; - PyObject* keepdim = Py_None; - static const char* keywords[3] = {"dim", "keepdim", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:argmax", const_cast(keywords), &dim, - &keepdim)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(1, self)); - PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "dim", dim) > -1); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "keepdim", keepdim) > -1); - PyObject* result = functional::argmax(NULL, tuple.get(), dict.get()); + PyObjectPtr concat_args(concat_self(self, args)); + PyObject* result = functional::argmax(NULL, concat_args.get(), kwargs); if (PyErr_Occurred()) { throw py::error_already_set(); } return result; END_HANDLE_ERRORS @@ -320,19 +318,8 @@ static PyObject* PyTensorObject_argmax(PyObject* self, PyObject* args, PyObject* static PyObject* PyTensorObject_argmin(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS - std::cout << "cpython argmax" << std::endl; - PyObject* dim = Py_None; - PyObject* keepdim = Py_None; - static const char* keywords[3] = {"dim", "keepdim", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:argmin", const_cast(keywords), &dim, - &keepdim)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(1, self)); - PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "dim", dim) > -1); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "keepdim", keepdim) > -1); - PyObject* result = functional::argmin(NULL, tuple.get(), dict.get()); + PyObjectPtr concat_args(concat_self(self, args)); + PyObject* result = functional::argmin(NULL, concat_args.get(), kwargs); if (PyErr_Occurred()) { throw py::error_already_set(); } return result; END_HANDLE_ERRORS @@ -341,18 +328,8 @@ static PyObject* PyTensorObject_argmin(PyObject* self, PyObject* args, PyObject* static PyObject* PyTensorObject_amin(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS std::cout << "cpython amin" << std::endl; - PyObject* dim = Py_None; - PyObject* keepdim = Py_None; - static const char* keywords[3] = {"dim", "keepdim", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO:amin", const_cast(keywords), &dim, - &keepdim)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(1, self)); - PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "dim", dim) > -1); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "keepdim", keepdim) > -1); - PyObject* result = functional::amin(NULL, tuple.get(), dict.get()); + PyObjectPtr concat_args(concat_self(self, args)); + PyObject* result = functional::amin(NULL, concat_args.get(), kwargs); if (PyErr_Occurred()) { throw py::error_already_set(); } return result; END_HANDLE_ERRORS @@ -360,14 +337,8 @@ static PyObject* PyTensorObject_amin(PyObject* self, PyObject* args, PyObject* k static PyObject* PyTensorObject_cast(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS - std::cout << "cpython" << std::endl; - PyObject* dtype = NULL; - static const char* keywords[2] = {"dtype", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:cast", const_cast(keywords), &dtype)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(2, self, dtype)); - PyObject* result = functional::cast(NULL, tuple.get(), NULL); + PyObjectPtr concat_args(concat_self(self, args)); + PyObject* result = functional::cast(NULL, concat_args.get(), kwargs); if (PyErr_Occurred()) { throw py::error_already_set(); } return result; END_HANDLE_ERRORS @@ -404,18 +375,8 @@ static PyObject* PyTensorObject_diagonal(PyObject* self, PyObject* args, PyObjec static PyObject* PyTensorObject_addcmul(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS std::cout << "cpython" << std::endl; - PyObject* tensor1 = NULL; - PyObject* tensor2 = NULL; - PyObject* value = PyFloat_FromDouble(1.0); - static const char* keywords[4] = {"tensor1", "tensor2", "value", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|$O:addcmul", const_cast(keywords), - &tensor1, &tensor2, &value)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(3, self, tensor1, tensor2)); - PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "value", value) > -1); - PyObject* result = functional::addcmul(NULL, tuple.get(), dict.get()); + PyObjectPtr concat_args(concat_self(self, args)); + PyObject* result = functional::addcmul(NULL, concat_args.get(), kwargs); if (PyErr_Occurred()) { throw py::error_already_set(); } return result; END_HANDLE_ERRORS @@ -424,18 +385,8 @@ static PyObject* PyTensorObject_addcmul(PyObject* self, PyObject* args, PyObject static PyObject* PyTensorObject_addcmul_(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS std::cout << "cpython" << std::endl; - PyObject* tensor1 = NULL; - PyObject* tensor2 = NULL; - PyObject* value = PyFloat_FromDouble(1.0); - static const char* keywords[4] = {"tensor1", "tensor2", "value", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|$O:addcmul_", const_cast(keywords), - &tensor1, &tensor2, &value)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(3, self, tensor1, tensor2)); - PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "value", value) > -1); - PyObject* result = functional::addcmul_(NULL, tuple.get(), dict.get()); + PyObjectPtr concat_args(concat_self(self, args)); + PyObject* result = functional::addcmul_(NULL, concat_args.get(), kwargs); if (PyErr_Occurred()) { throw py::error_already_set(); } return result; END_HANDLE_ERRORS @@ -444,15 +395,9 @@ static PyObject* PyTensorObject_addcmul_(PyObject* self, PyObject* args, PyObjec static PyObject* PyTensorObject_sub_(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS std::cout << "cpython" << std::endl; - PyObject* other = NULL; - static const char* keywords[2] = {"other", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:sub_", const_cast(keywords), &other)) { - return NULL; - } - PyObjectPtr tuple(PyTuple_Pack(2, self, other)); - PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "inplace", Py_True) > -1); - PyObject* result = functional::sub(NULL, tuple.get(), dict.get()); + if (kwargs != NULL) { CHECK_OR_THROW(PyDict_SetItemString(kwargs, "inplace", Py_True) > -1); } + PyObjectPtr concat_args(concat_self(self, args)); + PyObject* result = functional::sub(NULL, concat_args.get(), kwargs); if (PyErr_Occurred()) { throw py::error_already_set(); } return result; END_HANDLE_ERRORS From 0b9b0b63f885e9455d7c3a6aa5f4ea0776f805d1 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Tue, 31 May 2022 09:32:25 +0800 Subject: [PATCH 22/50] modify arguments name in matmul and floor_divide --- .../api/python/framework/tensor_functions.cpp | 37 +++++++++---------- oneflow/core/functional/functional_api.yaml | 8 ++-- 2 files changed, 22 insertions(+), 23 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 5b72b7eb65e..5a6e29cc7a7 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -15,15 +15,12 @@ limitations under the License. */ #include -#include #include -#include #include "oneflow/api/python/exception/exception.h" #include "oneflow/api/python/framework/size.h" #include "oneflow/api/python/functional/common.h" #include "oneflow/api/python/functional/functional_api.yaml.pybind.h" #include "oneflow/core/common/shape_vec.h" -#include "oneflow/core/common/throw.h" #include "oneflow/core/functional/functional.h" #include "oneflow/core/common/shape.h" @@ -225,20 +222,15 @@ UNARY_METHOD(PyTensorObject_tanh, functional::Tanh); UNARY_METHOD(PyTensorObject_atanh, functional::Atanh); UNARY_METHOD(PyTensorObject_logical_not, functional::LogicalNot); -#define BINARY_METHOD(func_name, bind_func, name) \ - static PyObject* func_name(PyObject* self, PyObject* args, PyObject* kwargs) { \ - HANDLE_ERRORS \ - PyObject* other = NULL; \ - static const char* keywords[2] = {"other", NULL}; \ - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:" name, const_cast(keywords), \ - &other)) { \ - return NULL; \ - } \ - PyObjectPtr tuple(PyTuple_Pack(2, self, other)); \ - auto* result = bind_func(NULL, tuple.get(), NULL); \ - if (PyErr_Occurred()) { throw py::error_already_set(); } \ - return result; \ - END_HANDLE_ERRORS \ +#define BINARY_METHOD(func_name, bind_func, name) \ + static PyObject* func_name(PyObject* self, PyObject* args, PyObject* kwargs) { \ + HANDLE_ERRORS \ + std::cout << "cpython" << name << std::endl; \ + PyObjectPtr concat_args(concat_self(self, args)); \ + auto* result = bind_func(NULL, concat_args.get(), kwargs); \ + if (PyErr_Occurred()) { throw py::error_already_set(); } \ + return result; \ + END_HANDLE_ERRORS \ } BINARY_METHOD(PyTensorObject_floor_divide, functional::floor_divide, "floor_divide"); @@ -255,7 +247,6 @@ BINARY_METHOD(PyTensorObject_matmul, functional::matmul, "matmul"); BINARY_METHOD(PyTensorObject_logical_and, functional::logical_and, "logical_and"); BINARY_METHOD(PyTensorObject_logical_or, functional::logical_or, "logical_or"); BINARY_METHOD(PyTensorObject_logical_xor, functional::logical_xor, "logical_xor"); -BINARY_METHOD(PyTensorObject_bmm, functional::batch_matmul, "bmm"); BINARY_METHOD(PyTensorObject_ne, functional::not_equal, "ne"); BINARY_METHOD(PyTensorObject_lt, functional::less, "lt"); BINARY_METHOD(PyTensorObject_le, functional::less_equal, "le"); @@ -266,6 +257,15 @@ static PyObject* PyTensorObject_byte(PyObject* self, PyObject* unused) { END_HANDLE_ERRORS } +static PyObject* PyTensorObject_bmm(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + PyObjectPtr concat_args(concat_self(self, args)); + PyObject* result = functional::batch_matmul(NULL, concat_args.get(), kwargs); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; + END_HANDLE_ERRORS +} + static PyObject* PyTensorObject_dim(PyObject* self, PyObject* unused) { HANDLE_ERRORS return functional::CastToPyObject(PyTensor_Unpack(self)->ndim()); @@ -597,7 +597,6 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"isnan", PyTensorObject_isnan, METH_NOARGS, NULL}, {"isinf", PyTensorObject_isinf, METH_NOARGS, NULL}, {"logical_not", PyTensorObject_logical_not, METH_NOARGS, NULL}, - // {"floor_divide", PyTensorObject_div, METH_O, NULL}, {"floor", PyTensorObject_floor, METH_NOARGS, NULL}, {"floor_", PyTensorObject_floor_, METH_NOARGS, NULL}, {"reshape", (PyCFunction)PyTensorObject_reshape, METH_VARARGS | METH_KEYWORDS, NULL}, diff --git a/oneflow/core/functional/functional_api.yaml b/oneflow/core/functional/functional_api.yaml index 4976974baa1..060e2b5fb69 100755 --- a/oneflow/core/functional/functional_api.yaml +++ b/oneflow/core/functional/functional_api.yaml @@ -223,9 +223,9 @@ - name: "floor_divide" signature: [ - "Tensor (Tensor x, Tensor y) => FloorDiv", - "Tensor (Tensor input, Scalar scalar, *, Bool inplace=False) => ScalarFloorDiv", - "Tensor (Tensor input, Scalar scalar) => ScalarFloorDiv", + "Tensor (Tensor input, Tensor other) => FloorDiv", + "Tensor (Tensor input, Scalar other, *, Bool inplace=False) => ScalarFloorDiv", + "Tensor (Tensor input, Scalar other) => ScalarFloorDiv", ] bind_python: True @@ -939,7 +939,7 @@ - name: "matmul" signature: - "Tensor (Tensor a, Tensor b, Bool transpose_a=False, Bool transpose_b=False, + "Tensor (Tensor input, Tensor other, Bool transpose_a=False, Bool transpose_b=False, Double alpha=1.0) => MatMul" bind_python: True From 52e58a95051b83f01ae47c601ad9452bed087600 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Tue, 31 May 2022 10:43:45 +0800 Subject: [PATCH 23/50] rename BINARY_FUNC to DIRECT_PASS_FUNC, modify some functions --- .../api/python/framework/tensor_functions.cpp | 136 +++++++----------- 1 file changed, 52 insertions(+), 84 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 5a6e29cc7a7..0f70c0a3079 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -15,7 +15,10 @@ limitations under the License. */ #include +#include +#include #include +#include #include "oneflow/api/python/exception/exception.h" #include "oneflow/api/python/framework/size.h" #include "oneflow/api/python/functional/common.h" @@ -23,6 +26,7 @@ limitations under the License. #include "oneflow/core/common/shape_vec.h" #include "oneflow/core/functional/functional.h" #include "oneflow/core/common/shape.h" +#include "oneflow/core/functional/functional_api.yaml.h" namespace oneflow { namespace one { @@ -222,34 +226,40 @@ UNARY_METHOD(PyTensorObject_tanh, functional::Tanh); UNARY_METHOD(PyTensorObject_atanh, functional::Atanh); UNARY_METHOD(PyTensorObject_logical_not, functional::LogicalNot); -#define BINARY_METHOD(func_name, bind_func, name) \ +#define DIRECT_PASS_FUNC(func_name, bind_func, name) \ static PyObject* func_name(PyObject* self, PyObject* args, PyObject* kwargs) { \ HANDLE_ERRORS \ std::cout << "cpython" << name << std::endl; \ PyObjectPtr concat_args(concat_self(self, args)); \ - auto* result = bind_func(NULL, concat_args.get(), kwargs); \ + PyObject* result = bind_func(NULL, concat_args.get(), kwargs); \ if (PyErr_Occurred()) { throw py::error_already_set(); } \ return result; \ END_HANDLE_ERRORS \ } -BINARY_METHOD(PyTensorObject_floor_divide, functional::floor_divide, "floor_divide"); -BINARY_METHOD(PyTensorObject_atan2, functional::atan2, "atan2"); -BINARY_METHOD(PyTensorObject_gt, functional::greater, "gt"); -BINARY_METHOD(PyTensorObject_ge, functional::greater_equal, "ge"); -BINARY_METHOD(PyTensorObject_div, functional::div, "div"); -BINARY_METHOD(PyTensorObject_div_, functional::div_, "div_"); -BINARY_METHOD(PyTensorObject_mul, functional::mul, "mul"); -BINARY_METHOD(PyTensorObject_mul_, functional::mul_, "mul_"); -BINARY_METHOD(PyTensorObject_sub, functional::sub, "sub"); -BINARY_METHOD(PyTensorObject_fmod, functional::fmod, "fmod"); -BINARY_METHOD(PyTensorObject_matmul, functional::matmul, "matmul"); -BINARY_METHOD(PyTensorObject_logical_and, functional::logical_and, "logical_and"); -BINARY_METHOD(PyTensorObject_logical_or, functional::logical_or, "logical_or"); -BINARY_METHOD(PyTensorObject_logical_xor, functional::logical_xor, "logical_xor"); -BINARY_METHOD(PyTensorObject_ne, functional::not_equal, "ne"); -BINARY_METHOD(PyTensorObject_lt, functional::less, "lt"); -BINARY_METHOD(PyTensorObject_le, functional::less_equal, "le"); +DIRECT_PASS_FUNC(PyTensorObject_floor_divide, functional::floor_divide, "floor_divide"); +DIRECT_PASS_FUNC(PyTensorObject_atan2, functional::atan2, "atan2"); +DIRECT_PASS_FUNC(PyTensorObject_gt, functional::greater, "gt"); +DIRECT_PASS_FUNC(PyTensorObject_ge, functional::greater_equal, "ge"); +DIRECT_PASS_FUNC(PyTensorObject_div, functional::div, "div"); +DIRECT_PASS_FUNC(PyTensorObject_div_, functional::div_, "div_"); +DIRECT_PASS_FUNC(PyTensorObject_mul, functional::mul, "mul"); +DIRECT_PASS_FUNC(PyTensorObject_mul_, functional::mul_, "mul_"); +DIRECT_PASS_FUNC(PyTensorObject_sub, functional::sub, "sub"); +DIRECT_PASS_FUNC(PyTensorObject_fmod, functional::fmod, "fmod"); +DIRECT_PASS_FUNC(PyTensorObject_matmul, functional::matmul, "matmul"); +DIRECT_PASS_FUNC(PyTensorObject_logical_and, functional::logical_and, "logical_and"); +DIRECT_PASS_FUNC(PyTensorObject_logical_or, functional::logical_or, "logical_or"); +DIRECT_PASS_FUNC(PyTensorObject_logical_xor, functional::logical_xor, "logical_xor"); +DIRECT_PASS_FUNC(PyTensorObject_ne, functional::not_equal, "ne"); +DIRECT_PASS_FUNC(PyTensorObject_lt, functional::less, "lt"); +DIRECT_PASS_FUNC(PyTensorObject_le, functional::less_equal, "le"); +DIRECT_PASS_FUNC(PyTensorObject_bmm, functional::batch_matmul, "bmm") +DIRECT_PASS_FUNC(PyTensorObject_argmax, functional::argmax, "argmax") +DIRECT_PASS_FUNC(PyTensorObject_argmin, functional::argmin, "argmin") +DIRECT_PASS_FUNC(PyTensorObject_amin, functional::amin, "amin") +DIRECT_PASS_FUNC(PyTensorObject_addcmul, functional::addcmul, "addcmul") +DIRECT_PASS_FUNC(PyTensorObject_addcmul_, functional::addcmul_, "addcmul_") static PyObject* PyTensorObject_byte(PyObject* self, PyObject* unused) { HANDLE_ERRORS @@ -257,15 +267,6 @@ static PyObject* PyTensorObject_byte(PyObject* self, PyObject* unused) { END_HANDLE_ERRORS } -static PyObject* PyTensorObject_bmm(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - PyObjectPtr concat_args(concat_self(self, args)); - PyObject* result = functional::batch_matmul(NULL, concat_args.get(), kwargs); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - static PyObject* PyTensorObject_dim(PyObject* self, PyObject* unused) { HANDLE_ERRORS return functional::CastToPyObject(PyTensor_Unpack(self)->ndim()); @@ -295,6 +296,7 @@ static PyObject* PyTensorObject_get_device(PyObject* self, PyObject* unused) { static PyObject* PyTensorObject_size(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS + std::cout << "cpython size" << std::endl; PyObject* idx = Py_None; static const char* keywords[2] = {"idx", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:size", const_cast(keywords), &idx)) { @@ -307,40 +309,19 @@ static PyObject* PyTensorObject_size(PyObject* self, PyObject* args, PyObject* k END_HANDLE_ERRORS } -static PyObject* PyTensorObject_argmax(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - PyObjectPtr concat_args(concat_self(self, args)); - PyObject* result = functional::argmax(NULL, concat_args.get(), kwargs); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_argmin(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - PyObjectPtr concat_args(concat_self(self, args)); - PyObject* result = functional::argmin(NULL, concat_args.get(), kwargs); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_amin(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - std::cout << "cpython amin" << std::endl; - PyObjectPtr concat_args(concat_self(self, args)); - PyObject* result = functional::amin(NULL, concat_args.get(), kwargs); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - static PyObject* PyTensorObject_cast(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS - PyObjectPtr concat_args(concat_self(self, args)); - PyObject* result = functional::cast(NULL, concat_args.get(), kwargs); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; + PyObject* dtype = NULL; + PyObject* pin_memory = Py_False; + static const char* keywords[3] = {"dtype", "pin_memroy", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|O!:cast", const_cast(keywords), + &dtype, &PyBool_Type, &pin_memory)) { + return NULL; + } + CHECK_OR_THROW(functional::PyDTypeCheck(dtype)) << Error::TypeError() << "cast(): argument 'dtype' must be data type, but found " + << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(dtype))); + const auto& result = functional::Cast(PyTensor_Unpack(self), functional::PyUnpackDType(dtype), pin_memory == Py_True); + return PyTensor_New(ASSERT_PTR(result)); END_HANDLE_ERRORS } @@ -372,32 +353,19 @@ static PyObject* PyTensorObject_diagonal(PyObject* self, PyObject* args, PyObjec END_HANDLE_ERRORS } -static PyObject* PyTensorObject_addcmul(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - std::cout << "cpython" << std::endl; - PyObjectPtr concat_args(concat_self(self, args)); - PyObject* result = functional::addcmul(NULL, concat_args.get(), kwargs); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_addcmul_(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - std::cout << "cpython" << std::endl; - PyObjectPtr concat_args(concat_self(self, args)); - PyObject* result = functional::addcmul_(NULL, concat_args.get(), kwargs); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - static PyObject* PyTensorObject_sub_(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS - std::cout << "cpython" << std::endl; - if (kwargs != NULL) { CHECK_OR_THROW(PyDict_SetItemString(kwargs, "inplace", Py_True) > -1); } - PyObjectPtr concat_args(concat_self(self, args)); - PyObject* result = functional::sub(NULL, concat_args.get(), kwargs); + PyObject* other = NULL; + static const char* keywords[2] = {"other", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:sub_", const_cast(keywords), &other)) { + return NULL; + } + std::cout << "cpython ?????" << std::endl; + PyObjectPtr dict(PyDict_New()); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "inplace", Py_True) > -1); + // if (kwargs != NULL) { CHECK_OR_THROW(PyDict_Merge(dict.get(), kwargs, 0) > -1); } + PyObjectPtr concat_args(PyTuple_Pack(2, self, other)); + PyObject* result = functional::sub(NULL, concat_args.get(), dict.get()); if (PyErr_Occurred()) { throw py::error_already_set(); } return result; END_HANDLE_ERRORS From 2b8d78e5fddb5a8b586bfc0f04c71112e24ebcbb Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Tue, 31 May 2022 10:53:20 +0800 Subject: [PATCH 24/50] refine code, format code --- .../api/python/framework/tensor_functions.cpp | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 0f70c0a3079..31f762c6025 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -83,7 +83,7 @@ NB_BINARY_FUNC(PyTensorObject_nb_matrix_multiply, functional::matmul); static PyObject* PyTensorObject_nb_pow(PyObject* a, PyObject* b, PyObject* unsed) { HANDLE_ERRORS PyObjectPtr tuple(PyTuple_Pack(2, a, b)); - auto* result = functional::pow(NULL, tuple.get(), NULL); + PyObject* result = functional::pow(NULL, tuple.get(), NULL); if (PyErr_Occurred()) { throw py::error_already_set(); } return result; END_HANDLE_ERRORS @@ -94,7 +94,7 @@ static PyObject* PyTensorObject_nb_invert(PyObject* self) { CHECK_OR_THROW(PyTensor_Unpack(self)->dtype()->data_type() == DataType::kBool) << "~ (operator.invert) is only implemented on integer and Boolean-type tensors"; PyObjectPtr tuple(PyTuple_Pack(1, self)); - auto* result = functional::logical_not(NULL, tuple.get(), NULL); + PyObject* result = functional::logical_not(NULL, tuple.get(), NULL); if (PyErr_Occurred()) { throw py::error_already_set(); } return result; END_HANDLE_ERRORS @@ -106,7 +106,7 @@ static PyObject* PyTensorObject_nb_invert(PyObject* self) { PyObjectPtr tuple(PyTuple_Pack(2, a, b)); \ PyObjectPtr dict(PyDict_New()); \ CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "inplace", Py_True) > -1); \ - auto* result = bind_func(NULL, tuple.get(), dict.get()); \ + PyObject* result = bind_func(NULL, tuple.get(), dict.get()); \ if (PyErr_Occurred()) { throw py::error_already_set(); } \ return result; \ END_HANDLE_ERRORS \ @@ -226,12 +226,12 @@ UNARY_METHOD(PyTensorObject_tanh, functional::Tanh); UNARY_METHOD(PyTensorObject_atanh, functional::Atanh); UNARY_METHOD(PyTensorObject_logical_not, functional::LogicalNot); -#define DIRECT_PASS_FUNC(func_name, bind_func, name) \ +#define DIRECT_PASS_FUNC(func_name, bind_func, name) \ static PyObject* func_name(PyObject* self, PyObject* args, PyObject* kwargs) { \ HANDLE_ERRORS \ std::cout << "cpython" << name << std::endl; \ PyObjectPtr concat_args(concat_self(self, args)); \ - PyObject* result = bind_func(NULL, concat_args.get(), kwargs); \ + PyObject* result = bind_func(NULL, concat_args.get(), kwargs); \ if (PyErr_Occurred()) { throw py::error_already_set(); } \ return result; \ END_HANDLE_ERRORS \ @@ -314,13 +314,15 @@ static PyObject* PyTensorObject_cast(PyObject* self, PyObject* args, PyObject* k PyObject* dtype = NULL; PyObject* pin_memory = Py_False; static const char* keywords[3] = {"dtype", "pin_memroy", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|O!:cast", const_cast(keywords), - &dtype, &PyBool_Type, &pin_memory)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|O!:cast", const_cast(keywords), &dtype, + &PyBool_Type, &pin_memory)) { return NULL; } - CHECK_OR_THROW(functional::PyDTypeCheck(dtype)) << Error::TypeError() << "cast(): argument 'dtype' must be data type, but found " + CHECK_OR_THROW(functional::PyDTypeCheck(dtype)) + << Error::TypeError() << "cast(): argument 'dtype' must be data type, but found " << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(dtype))); - const auto& result = functional::Cast(PyTensor_Unpack(self), functional::PyUnpackDType(dtype), pin_memory == Py_True); + const auto& result = functional::Cast(PyTensor_Unpack(self), functional::PyUnpackDType(dtype), + pin_memory == Py_True); return PyTensor_New(ASSERT_PTR(result)); END_HANDLE_ERRORS } @@ -357,7 +359,7 @@ static PyObject* PyTensorObject_sub_(PyObject* self, PyObject* args, PyObject* k HANDLE_ERRORS PyObject* other = NULL; static const char* keywords[2] = {"other", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:sub_", const_cast(keywords), &other)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:sub_", const_cast(keywords), &other)) { return NULL; } std::cout << "cpython ?????" << std::endl; From 005cd6d5e3c70066d2e505e22256d6ded464187f Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Tue, 31 May 2022 11:00:08 +0800 Subject: [PATCH 25/50] add inplace /=, add comments --- oneflow/api/python/framework/tensor_functions.cpp | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 31f762c6025..f50e27349d5 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -119,7 +119,7 @@ NB_INPLACE_BINARY_FUNC(PyTensorObject_nb_inplace_sub, functional::sub); NB_BINARY_FUNC(PyTensorObject_nb_inplace_mul, functional::mul_); NB_BINARY_FUNC(PyTensorObject_nb_inplace_true_div, functional::div_); -PyObject* PyTensorObject_inplace_pow(PyObject* a, PyObject* b, PyObject* unsed) { +PyObject* PyTensorObject_nb_inplace_pow(PyObject* a, PyObject* b, PyObject* unsed) { HANDLE_ERRORS PyObjectPtr tuple(PyTuple_Pack(2, a, b)); PyObjectPtr dict(PyDict_New()); @@ -155,7 +155,7 @@ PyNumberMethods PyTensorObject_as_number = { PyTensorObject_nb_inplace_sub, // nb_inplace_sub PyTensorObject_nb_inplace_mul, // nb_inplace_mul NULL, // nb_inplace_remainder - NULL, // nb_inplace_pow + PyTensorObject_nb_inplace_pow, // nb_inplace_pow NULL, // nb_inplace_lshift NULL, // nb_inplace_rshift NULL, // nb_inplace_and @@ -169,11 +169,13 @@ PyNumberMethods PyTensorObject_as_number = { NULL, // nb_index PyTensorObject_nb_matrix_multiply, // nb_matrix_multiply - NULL, // not implemented yet nb_inplace_matrix_multiply + NULL, // nb_inplace_matrix_multiply }; // extra methods + +// functions that accept only one Tensor #define UNARY_METHOD(func_name, bind_func) \ static PyObject* func_name(PyObject* self, PyObject* unused) { \ HANDLE_ERRORS \ @@ -226,6 +228,7 @@ UNARY_METHOD(PyTensorObject_tanh, functional::Tanh); UNARY_METHOD(PyTensorObject_atanh, functional::Atanh); UNARY_METHOD(PyTensorObject_logical_not, functional::LogicalNot); +// functions that directly pass arguments without parsing #define DIRECT_PASS_FUNC(func_name, bind_func, name) \ static PyObject* func_name(PyObject* self, PyObject* args, PyObject* kwargs) { \ HANDLE_ERRORS \ @@ -261,6 +264,7 @@ DIRECT_PASS_FUNC(PyTensorObject_amin, functional::amin, "amin") DIRECT_PASS_FUNC(PyTensorObject_addcmul, functional::addcmul, "addcmul") DIRECT_PASS_FUNC(PyTensorObject_addcmul_, functional::addcmul_, "addcmul_") +// functions that parsing at Python C api layer static PyObject* PyTensorObject_byte(PyObject* self, PyObject* unused) { HANDLE_ERRORS return PyTensor_New(ASSERT_PTR(functional::To(PyTensor_Unpack(self), DType::UInt8(), false))); From 67e9d378d49a945edc218ea554fe0a5346f9b81c Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Tue, 31 May 2022 13:51:08 +0800 Subject: [PATCH 26/50] remove name in macros --- .../api/python/framework/tensor_functions.cpp | 67 +++++++++++-------- 1 file changed, 40 insertions(+), 27 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index f50e27349d5..b96fc28a052 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -229,10 +229,10 @@ UNARY_METHOD(PyTensorObject_atanh, functional::Atanh); UNARY_METHOD(PyTensorObject_logical_not, functional::LogicalNot); // functions that directly pass arguments without parsing -#define DIRECT_PASS_FUNC(func_name, bind_func, name) \ +#define DIRECT_PASS_FUNC(func_name, bind_func) \ static PyObject* func_name(PyObject* self, PyObject* args, PyObject* kwargs) { \ HANDLE_ERRORS \ - std::cout << "cpython" << name << std::endl; \ + std::cout << "cpython" << std::endl; \ PyObjectPtr concat_args(concat_self(self, args)); \ PyObject* result = bind_func(NULL, concat_args.get(), kwargs); \ if (PyErr_Occurred()) { throw py::error_already_set(); } \ @@ -240,29 +240,28 @@ UNARY_METHOD(PyTensorObject_logical_not, functional::LogicalNot); END_HANDLE_ERRORS \ } -DIRECT_PASS_FUNC(PyTensorObject_floor_divide, functional::floor_divide, "floor_divide"); -DIRECT_PASS_FUNC(PyTensorObject_atan2, functional::atan2, "atan2"); -DIRECT_PASS_FUNC(PyTensorObject_gt, functional::greater, "gt"); -DIRECT_PASS_FUNC(PyTensorObject_ge, functional::greater_equal, "ge"); -DIRECT_PASS_FUNC(PyTensorObject_div, functional::div, "div"); -DIRECT_PASS_FUNC(PyTensorObject_div_, functional::div_, "div_"); -DIRECT_PASS_FUNC(PyTensorObject_mul, functional::mul, "mul"); -DIRECT_PASS_FUNC(PyTensorObject_mul_, functional::mul_, "mul_"); -DIRECT_PASS_FUNC(PyTensorObject_sub, functional::sub, "sub"); -DIRECT_PASS_FUNC(PyTensorObject_fmod, functional::fmod, "fmod"); -DIRECT_PASS_FUNC(PyTensorObject_matmul, functional::matmul, "matmul"); -DIRECT_PASS_FUNC(PyTensorObject_logical_and, functional::logical_and, "logical_and"); -DIRECT_PASS_FUNC(PyTensorObject_logical_or, functional::logical_or, "logical_or"); -DIRECT_PASS_FUNC(PyTensorObject_logical_xor, functional::logical_xor, "logical_xor"); -DIRECT_PASS_FUNC(PyTensorObject_ne, functional::not_equal, "ne"); -DIRECT_PASS_FUNC(PyTensorObject_lt, functional::less, "lt"); -DIRECT_PASS_FUNC(PyTensorObject_le, functional::less_equal, "le"); -DIRECT_PASS_FUNC(PyTensorObject_bmm, functional::batch_matmul, "bmm") -DIRECT_PASS_FUNC(PyTensorObject_argmax, functional::argmax, "argmax") -DIRECT_PASS_FUNC(PyTensorObject_argmin, functional::argmin, "argmin") -DIRECT_PASS_FUNC(PyTensorObject_amin, functional::amin, "amin") -DIRECT_PASS_FUNC(PyTensorObject_addcmul, functional::addcmul, "addcmul") -DIRECT_PASS_FUNC(PyTensorObject_addcmul_, functional::addcmul_, "addcmul_") +DIRECT_PASS_FUNC(PyTensorObject_floor_divide, functional::floor_divide) +DIRECT_PASS_FUNC(PyTensorObject_atan2, functional::atan2) +DIRECT_PASS_FUNC(PyTensorObject_gt, functional::greater) +DIRECT_PASS_FUNC(PyTensorObject_ge, functional::greater_equal) +DIRECT_PASS_FUNC(PyTensorObject_div, functional::div) +DIRECT_PASS_FUNC(PyTensorObject_div_, functional::div_) +DIRECT_PASS_FUNC(PyTensorObject_mul, functional::mul) +DIRECT_PASS_FUNC(PyTensorObject_mul_, functional::mul_) +DIRECT_PASS_FUNC(PyTensorObject_sub, functional::sub) +DIRECT_PASS_FUNC(PyTensorObject_fmod, functional::fmod) +DIRECT_PASS_FUNC(PyTensorObject_logical_and, functional::logical_and) +DIRECT_PASS_FUNC(PyTensorObject_logical_or, functional::logical_or) +DIRECT_PASS_FUNC(PyTensorObject_logical_xor, functional::logical_xor) +DIRECT_PASS_FUNC(PyTensorObject_ne, functional::not_equal) +DIRECT_PASS_FUNC(PyTensorObject_lt, functional::less) +DIRECT_PASS_FUNC(PyTensorObject_le, functional::less_equal) +DIRECT_PASS_FUNC(PyTensorObject_bmm, functional::batch_matmul) +DIRECT_PASS_FUNC(PyTensorObject_argmax, functional::argmax) +DIRECT_PASS_FUNC(PyTensorObject_argmin, functional::argmin) +DIRECT_PASS_FUNC(PyTensorObject_amin, functional::amin) +DIRECT_PASS_FUNC(PyTensorObject_addcmul, functional::addcmul) +DIRECT_PASS_FUNC(PyTensorObject_addcmul_, functional::addcmul_) // functions that parsing at Python C api layer static PyObject* PyTensorObject_byte(PyObject* self, PyObject* unused) { @@ -317,7 +316,7 @@ static PyObject* PyTensorObject_cast(PyObject* self, PyObject* args, PyObject* k HANDLE_ERRORS PyObject* dtype = NULL; PyObject* pin_memory = Py_False; - static const char* keywords[3] = {"dtype", "pin_memroy", NULL}; + static const char* keywords[3] = {"dtype", "pin_memory", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|O!:cast", const_cast(keywords), &dtype, &PyBool_Type, &pin_memory)) { return NULL; @@ -359,6 +358,21 @@ static PyObject* PyTensorObject_diagonal(PyObject* self, PyObject* args, PyObjec END_HANDLE_ERRORS } +static PyObject* PyTensorObject_matmul(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + PyObject* other = NULL; + static const char* keywords[2] = {"other", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:matmul", const_cast(keywords), + &other)) { + return NULL; + } + PyObjectPtr concat_args(PyTuple_Pack(2, self, other)); + PyObject* result = functional::matmul(NULL, concat_args.get(), NULL); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; + END_HANDLE_ERRORS +} + static PyObject* PyTensorObject_sub_(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS PyObject* other = NULL; @@ -505,7 +519,6 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"gt", (PyCFunction)PyTensorObject_gt, METH_VARARGS | METH_KEYWORDS, NULL}, {"ge", (PyCFunction)PyTensorObject_ge, METH_VARARGS | METH_KEYWORDS, NULL}, {"div", (PyCFunction)PyTensorObject_div, METH_VARARGS | METH_KEYWORDS, NULL}, - // {"floor_divide", (PyCFunction)PyTensorObject_div, METH_VARARGS | METH_KEYWORDS, NULL}, {"div_", (PyCFunction)PyTensorObject_div_, METH_VARARGS | METH_KEYWORDS, NULL}, {"mul", (PyCFunction)PyTensorObject_mul, METH_VARARGS | METH_KEYWORDS, NULL}, {"mul_", (PyCFunction)PyTensorObject_mul_, METH_VARARGS | METH_KEYWORDS, NULL}, From f1390603eba385c83632a66d0737c26f5b3e0ceb Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Tue, 31 May 2022 13:51:49 +0800 Subject: [PATCH 27/50] remove python api --- python/oneflow/framework/tensor.py | 36 +----------------------------- 1 file changed, 1 insertion(+), 35 deletions(-) diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index 9b67f1d137c..a2d15501605 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -1139,10 +1139,8 @@ def _cumprod(self, dim, dtype=None): def RegisterMethods(): - Tensor.__iadd__ = lambda self, other: self.add_(other) Tensor.ndim = property(_ndim) Tensor.numpy = _numpy - # Tensor.size = _size Tensor.backward = _backward Tensor.__setitem__ = _setitem Tensor.__str__ = _str @@ -1166,28 +1164,10 @@ def RegisterMethods(): Tensor.fill_ = _fill Tensor.copy_ = _copy Tensor._meta_repr = _meta_repr - # Tensor.floor_divide = _floor_divide - # Tensor.argmax = _argmax - # Tensor.argmin = _argmin Tensor.argsort = _argsort Tensor.argwhere = _argwhere - # Tensor.amin = _amin - # Tensor.atan2 = _atan2 - # Tensor.gt = _gt - # Tensor.ge = _ge - # Tensor.cast = _cast - # Tensor.diag = _diag - # Tensor.diagonal = _diagonal Tensor.add = _add Tensor.add_ = _add_inplace - # Tensor.addcmul = _addcmul - # Tensor.addcmul_ = _addcmul_ - # Tensor.div = _truediv - # Tensor.div_ = _truediv_inplace - # Tensor.mul = _mul - # Tensor.mul_ = _mul_ - # Tensor.sub = _sub - # Tensor.sub_ = _sub_inplace Tensor.clamp = _clamp Tensor.clamp_ = _clamp_ Tensor.clip = _clip @@ -1196,7 +1176,6 @@ def RegisterMethods(): Tensor.cuda = _cuda Tensor.expand = _expand Tensor.expand_as = _expand_as - # Tensor.fmod = _fmod Tensor.flatten = _flatten Tensor.flip = _flip Tensor.in_top_k = _in_top_k @@ -1209,12 +1188,11 @@ def RegisterMethods(): Tensor.pow = _pow Tensor.var = _var Tensor.std = _std - # Tensor.matmul = _matmul Tensor.softplus = _softplus Tensor.tril = _tril Tensor.triu = _triu Tensor.where = _where - # Tensor.norm = _norm + Tensor.norm = _norm Tensor.local_to_global = _local_to_global Tensor.global_to_global = _global_to_global Tensor.to_global = _to_global @@ -1222,12 +1200,7 @@ def RegisterMethods(): Tensor.relu_ = _relu_inplace Tensor.softmax = _softmax Tensor.log_softmax = _log_softmax - # Tensor.logical_and = _and - # Tensor.logical_or = _or - # Tensor.logical_not = _not - # Tensor.logical_xor = _xor Tensor.roll = _roll - # Tensor.bmm = _bmm Tensor.chunk = _chunk Tensor.repeat = _repeat Tensor.tile = _tile @@ -1249,18 +1222,11 @@ def RegisterMethods(): Tensor.masked_fill = _masked_fill Tensor.masked_select = _masked_select Tensor.eq = _eq - # Tensor.ne = _ne Tensor.item = _item - # Tensor.lt = _lt - # Tensor.le = _le Tensor.to_local = _to_local Tensor.sort = _sort Tensor.type_as = _type_as Tensor.tolist = _tolist - # Tensor.int = _int - # Tensor.long = _long - # Tensor.float = _float - # Tensor.double = _double Tensor.is_floating_point = _is_floating_point Tensor.topk = _topk Tensor.nms = _nms From ac74978f92b1d956a5865ccf7f2a68e80e0375c5 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Tue, 31 May 2022 13:53:59 +0800 Subject: [PATCH 28/50] remove redundant include --- oneflow/api/python/framework/tensor_functions.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index b96fc28a052..4d103c4a150 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -15,10 +15,6 @@ limitations under the License. */ #include -#include -#include -#include -#include #include "oneflow/api/python/exception/exception.h" #include "oneflow/api/python/framework/size.h" #include "oneflow/api/python/functional/common.h" @@ -26,7 +22,6 @@ limitations under the License. #include "oneflow/core/common/shape_vec.h" #include "oneflow/core/functional/functional.h" #include "oneflow/core/common/shape.h" -#include "oneflow/core/functional/functional_api.yaml.h" namespace oneflow { namespace one { From 6174148de4bb89fd084599320cb571a03f3d9b0a Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Tue, 31 May 2022 14:01:29 +0800 Subject: [PATCH 29/50] remove cout --- oneflow/api/python/framework/tensor_functions.cpp | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 4d103c4a150..3e5197dbe54 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -174,7 +174,6 @@ PyNumberMethods PyTensorObject_as_number = { #define UNARY_METHOD(func_name, bind_func) \ static PyObject* func_name(PyObject* self, PyObject* unused) { \ HANDLE_ERRORS \ - std::cout << "cpython" << std::endl; \ return PyTensor_New(ASSERT_PTR(bind_func(PyTensor_Unpack(self)))); \ END_HANDLE_ERRORS \ } @@ -227,7 +226,6 @@ UNARY_METHOD(PyTensorObject_logical_not, functional::LogicalNot); #define DIRECT_PASS_FUNC(func_name, bind_func) \ static PyObject* func_name(PyObject* self, PyObject* args, PyObject* kwargs) { \ HANDLE_ERRORS \ - std::cout << "cpython" << std::endl; \ PyObjectPtr concat_args(concat_self(self, args)); \ PyObject* result = bind_func(NULL, concat_args.get(), kwargs); \ if (PyErr_Occurred()) { throw py::error_already_set(); } \ @@ -294,7 +292,6 @@ static PyObject* PyTensorObject_get_device(PyObject* self, PyObject* unused) { static PyObject* PyTensorObject_size(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS - std::cout << "cpython size" << std::endl; PyObject* idx = Py_None; static const char* keywords[2] = {"idx", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:size", const_cast(keywords), &idx)) { @@ -327,7 +324,6 @@ static PyObject* PyTensorObject_cast(PyObject* self, PyObject* args, PyObject* k static PyObject* PyTensorObject_diag(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS - std::cout << "cpython" << std::endl; int32_t diagonal = 0; static const char* keywords[2] = {"diagonal", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|i:diag", const_cast(keywords), @@ -340,7 +336,6 @@ static PyObject* PyTensorObject_diag(PyObject* self, PyObject* args, PyObject* k static PyObject* PyTensorObject_diagonal(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS - std::cout << "cpython" << std::endl; int32_t offset = 0; int32_t dim1 = 0; int32_t dim2 = 1; @@ -375,7 +370,6 @@ static PyObject* PyTensorObject_sub_(PyObject* self, PyObject* args, PyObject* k if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:sub_", const_cast(keywords), &other)) { return NULL; } - std::cout << "cpython ?????" << std::endl; PyObjectPtr dict(PyDict_New()); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "inplace", Py_True) > -1); // if (kwargs != NULL) { CHECK_OR_THROW(PyDict_Merge(dict.get(), kwargs, 0) > -1); } @@ -503,12 +497,13 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"addcmul", (PyCFunction)PyTensorObject_addcmul, METH_VARARGS | METH_KEYWORDS, NULL}, {"addcmul_", (PyCFunction)PyTensorObject_addcmul, METH_VARARGS | METH_KEYWORDS, NULL}, {"sub_", (PyCFunction)PyTensorObject_sub_, METH_VARARGS | METH_KEYWORDS, NULL}, + {"matmul", (PyCFunction)PyTensorObject_matmul, METH_VARARGS | METH_KEYWORDS, NULL}, {"int", PyTensorObject_int, METH_NOARGS, NULL}, {"long", PyTensorObject_long, METH_NOARGS, NULL}, {"float", PyTensorObject_float, METH_NOARGS, NULL}, {"double", PyTensorObject_double, METH_NOARGS, NULL}, - // macro BINARY_METHOD + // macro DIRECT_PASS_FUNC {"floor_divide", (PyCFunction)PyTensorObject_floor_divide, METH_VARARGS | METH_KEYWORDS, NULL}, {"atan2", (PyCFunction)PyTensorObject_atan2, METH_VARARGS | METH_KEYWORDS, NULL}, {"gt", (PyCFunction)PyTensorObject_gt, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -519,7 +514,6 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"mul_", (PyCFunction)PyTensorObject_mul_, METH_VARARGS | METH_KEYWORDS, NULL}, {"sub", (PyCFunction)PyTensorObject_sub, METH_VARARGS | METH_KEYWORDS, NULL}, {"fmod", (PyCFunction)PyTensorObject_fmod, METH_VARARGS | METH_KEYWORDS, NULL}, - {"matmul", (PyCFunction)PyTensorObject_matmul, METH_VARARGS | METH_KEYWORDS, NULL}, {"logical_and", (PyCFunction)PyTensorObject_logical_and, METH_VARARGS | METH_KEYWORDS, NULL}, {"logical_or", (PyCFunction)PyTensorObject_logical_or, METH_VARARGS | METH_KEYWORDS, NULL}, {"logical_xor", (PyCFunction)PyTensorObject_logical_xor, METH_VARARGS | METH_KEYWORDS, NULL}, From dce09af4e94e3c22124e5c4ddae2128d3106b3b7 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Tue, 31 May 2022 14:04:24 +0800 Subject: [PATCH 30/50] format code --- oneflow/api/python/framework/tensor_functions.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 3e5197dbe54..33e27a0e25d 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -372,7 +372,6 @@ static PyObject* PyTensorObject_sub_(PyObject* self, PyObject* args, PyObject* k } PyObjectPtr dict(PyDict_New()); CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "inplace", Py_True) > -1); - // if (kwargs != NULL) { CHECK_OR_THROW(PyDict_Merge(dict.get(), kwargs, 0) > -1); } PyObjectPtr concat_args(PyTuple_Pack(2, self, other)); PyObject* result = functional::sub(NULL, concat_args.get(), dict.get()); if (PyErr_Occurred()) { throw py::error_already_set(); } From 67f7cb56db9e7bc4505a52746c7116fc180fcbaa Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Tue, 31 May 2022 16:19:52 +0800 Subject: [PATCH 31/50] refactor tensor.size by directly call shape.at, refactor tensor.sub_ by calling nb_sub_ --- .../api/python/framework/tensor_functions.cpp | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 33e27a0e25d..c789c6a5f4e 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -289,18 +289,20 @@ static PyObject* PyTensorObject_get_device(PyObject* self, PyObject* unused) { return functional::CastToPyObject(ASSERT(PyTensor_Unpack(self)->device())->device_id()); END_HANDLE_ERRORS } - +// -> static PyObject* PyTensorObject_size(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS - PyObject* idx = Py_None; + PyObject* idx_obj = Py_None; static const char* keywords[2] = {"idx", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:size", const_cast(keywords), &idx)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:size", const_cast(keywords), &idx_obj)) { return NULL; } auto shape = PyTensor_Unpack(self)->shape(); - PyObject* shape_object = TensorSize_NewFromShape(*shape); - if (idx == NULL || idx == Py_None) return shape_object; - return shape_object->ob_type->tp_as_mapping->mp_subscript(shape_object, idx); + if (idx_obj == NULL || idx_obj == Py_None) return TensorSize_NewFromShape(*shape); + int32_t idx = PyLong_AsLong(idx_obj); + CHECK_OR_THROW(idx >= -shape->NumAxes() && idx < shape->NumAxes()) << Error::IndexError() << "Dimension out of range (expected to be in range of [" << -shape->NumAxes() << ", " << shape->NumAxes() - 1 << "], but got "<< idx << ")"; + idx = idx < 0 ? idx + shape->NumAxes() : idx; + return PyLong_FromLong(shape->At(idx)); END_HANDLE_ERRORS } @@ -370,10 +372,8 @@ static PyObject* PyTensorObject_sub_(PyObject* self, PyObject* args, PyObject* k if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:sub_", const_cast(keywords), &other)) { return NULL; } - PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "inplace", Py_True) > -1); PyObjectPtr concat_args(PyTuple_Pack(2, self, other)); - PyObject* result = functional::sub(NULL, concat_args.get(), dict.get()); + PyObject* result = PyTensorObject_nb_inplace_sub(self, other); if (PyErr_Occurred()) { throw py::error_already_set(); } return result; END_HANDLE_ERRORS From 50a1a84267a418be99b2327464e2a03f0d22d371 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Tue, 31 May 2022 16:26:03 +0800 Subject: [PATCH 32/50] remove redundant code --- oneflow/api/python/framework/tensor_functions.cpp | 1 - 1 file changed, 1 deletion(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index c789c6a5f4e..7c976189271 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -372,7 +372,6 @@ static PyObject* PyTensorObject_sub_(PyObject* self, PyObject* args, PyObject* k if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:sub_", const_cast(keywords), &other)) { return NULL; } - PyObjectPtr concat_args(PyTuple_Pack(2, self, other)); PyObject* result = PyTensorObject_nb_inplace_sub(self, other); if (PyErr_Occurred()) { throw py::error_already_set(); } return result; From 5815c469ff7e6cf51de906ae1c1249deefbd9e33 Mon Sep 17 00:00:00 2001 From: oneflow-ci-bot Date: Tue, 31 May 2022 08:35:51 +0000 Subject: [PATCH 33/50] auto format by CI --- oneflow/api/python/framework/tensor_functions.cpp | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 7c976189271..fe8963f4067 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -294,13 +294,16 @@ static PyObject* PyTensorObject_size(PyObject* self, PyObject* args, PyObject* k HANDLE_ERRORS PyObject* idx_obj = Py_None; static const char* keywords[2] = {"idx", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:size", const_cast(keywords), &idx_obj)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:size", const_cast(keywords), + &idx_obj)) { return NULL; } auto shape = PyTensor_Unpack(self)->shape(); if (idx_obj == NULL || idx_obj == Py_None) return TensorSize_NewFromShape(*shape); int32_t idx = PyLong_AsLong(idx_obj); - CHECK_OR_THROW(idx >= -shape->NumAxes() && idx < shape->NumAxes()) << Error::IndexError() << "Dimension out of range (expected to be in range of [" << -shape->NumAxes() << ", " << shape->NumAxes() - 1 << "], but got "<< idx << ")"; + CHECK_OR_THROW(idx >= -shape->NumAxes() && idx < shape->NumAxes()) + << Error::IndexError() << "Dimension out of range (expected to be in range of [" + << -shape->NumAxes() << ", " << shape->NumAxes() - 1 << "], but got " << idx << ")"; idx = idx < 0 ? idx + shape->NumAxes() : idx; return PyLong_FromLong(shape->At(idx)); END_HANDLE_ERRORS From 6ec91f5aa3d636e5792064fb1a18534f317161ca Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Tue, 31 May 2022 17:12:38 +0800 Subject: [PATCH 34/50] fix typo, fix wrong call --- oneflow/api/python/framework/tensor_functions.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index fe8963f4067..c3949cc34e3 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -289,7 +289,7 @@ static PyObject* PyTensorObject_get_device(PyObject* self, PyObject* unused) { return functional::CastToPyObject(ASSERT(PyTensor_Unpack(self)->device())->device_id()); END_HANDLE_ERRORS } -// -> + static PyObject* PyTensorObject_size(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS PyObject* idx_obj = Py_None; @@ -496,7 +496,7 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"diag", (PyCFunction)PyTensorObject_diag, METH_VARARGS | METH_KEYWORDS, NULL}, {"diagonal", (PyCFunction)PyTensorObject_diagonal, METH_VARARGS | METH_KEYWORDS, NULL}, {"addcmul", (PyCFunction)PyTensorObject_addcmul, METH_VARARGS | METH_KEYWORDS, NULL}, - {"addcmul_", (PyCFunction)PyTensorObject_addcmul, METH_VARARGS | METH_KEYWORDS, NULL}, + {"addcmul_", (PyCFunction)PyTensorObject_addcmul_, METH_VARARGS | METH_KEYWORDS, NULL}, {"sub_", (PyCFunction)PyTensorObject_sub_, METH_VARARGS | METH_KEYWORDS, NULL}, {"matmul", (PyCFunction)PyTensorObject_matmul, METH_VARARGS | METH_KEYWORDS, NULL}, {"int", PyTensorObject_int, METH_NOARGS, NULL}, From fa0051d4bbe532e81d815ef90be8abdfc0fc56d4 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Wed, 1 Jun 2022 09:44:44 +0800 Subject: [PATCH 35/50] modify idx datatype from int32 to int64 in tensor.size --- oneflow/api/python/framework/tensor_functions.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index c3949cc34e3..3f8f8e7d378 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -300,12 +300,12 @@ static PyObject* PyTensorObject_size(PyObject* self, PyObject* args, PyObject* k } auto shape = PyTensor_Unpack(self)->shape(); if (idx_obj == NULL || idx_obj == Py_None) return TensorSize_NewFromShape(*shape); - int32_t idx = PyLong_AsLong(idx_obj); + int64_t idx = PyLong_AsLongLong(idx_obj); CHECK_OR_THROW(idx >= -shape->NumAxes() && idx < shape->NumAxes()) << Error::IndexError() << "Dimension out of range (expected to be in range of [" << -shape->NumAxes() << ", " << shape->NumAxes() - 1 << "], but got " << idx << ")"; idx = idx < 0 ? idx + shape->NumAxes() : idx; - return PyLong_FromLong(shape->At(idx)); + return PyLong_FromLongLong(shape->At(idx)); END_HANDLE_ERRORS } From 9e9920fd7236498d2fbebe3ed7c739adcd69230e Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Wed, 1 Jun 2022 09:49:52 +0800 Subject: [PATCH 36/50] add some DIRECT_PASS_FUNC --- .../api/python/framework/tensor_functions.cpp | 77 ++++++++++++++++++- python/oneflow/framework/tensor.py | 54 ++++++------- 2 files changed, 100 insertions(+), 31 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 7c976189271..48d6ec73f1a 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -15,6 +15,7 @@ limitations under the License. */ #include +#include #include "oneflow/api/python/exception/exception.h" #include "oneflow/api/python/framework/size.h" #include "oneflow/api/python/functional/common.h" @@ -241,6 +242,7 @@ DIRECT_PASS_FUNC(PyTensorObject_div, functional::div) DIRECT_PASS_FUNC(PyTensorObject_div_, functional::div_) DIRECT_PASS_FUNC(PyTensorObject_mul, functional::mul) DIRECT_PASS_FUNC(PyTensorObject_mul_, functional::mul_) +DIRECT_PASS_FUNC(PyTensorObject_add, functional::add) DIRECT_PASS_FUNC(PyTensorObject_sub, functional::sub) DIRECT_PASS_FUNC(PyTensorObject_fmod, functional::fmod) DIRECT_PASS_FUNC(PyTensorObject_logical_and, functional::logical_and) @@ -253,8 +255,32 @@ DIRECT_PASS_FUNC(PyTensorObject_bmm, functional::batch_matmul) DIRECT_PASS_FUNC(PyTensorObject_argmax, functional::argmax) DIRECT_PASS_FUNC(PyTensorObject_argmin, functional::argmin) DIRECT_PASS_FUNC(PyTensorObject_amin, functional::amin) +DIRECT_PASS_FUNC(PyTensorObject_amax, functional::amax) DIRECT_PASS_FUNC(PyTensorObject_addcmul, functional::addcmul) DIRECT_PASS_FUNC(PyTensorObject_addcmul_, functional::addcmul_) +DIRECT_PASS_FUNC(PyTensorObject_clip, functional::clip) +DIRECT_PASS_FUNC(PyTensorObject_clip_, functional::clip_) +DIRECT_PASS_FUNC(PyTensorObject_clamp, functional::clamp) +DIRECT_PASS_FUNC(PyTensorObject_clamp_, functional::clamp_) +DIRECT_PASS_FUNC(PyTensorObject_flatten, functional::flatten) +DIRECT_PASS_FUNC(PyTensorObject_in_top_k, functional::in_top_k) +DIRECT_PASS_FUNC(PyTensorObject_index_select, functional::index_select) +DIRECT_PASS_FUNC(PyTensorObject_maximum, functional::maximum) +DIRECT_PASS_FUNC(PyTensorObject_minimum, functional::minimum) +DIRECT_PASS_FUNC(PyTensorObject_tril, functional::tril) +DIRECT_PASS_FUNC(PyTensorObject_triu, functional::triu) +DIRECT_PASS_FUNC(PyTensorObject_softmax, functional::softmax) +DIRECT_PASS_FUNC(PyTensorObject_log_softmax, functional::log_softmax) +DIRECT_PASS_FUNC(PyTensorObject_roll, functional::roll) +DIRECT_PASS_FUNC(PyTensorObject_unbind, functional::unbind) +DIRECT_PASS_FUNC(PyTensorObject_squeeze, functional::squeeze) +DIRECT_PASS_FUNC(PyTensorObject_swapaxes, functional::swapaxes) +DIRECT_PASS_FUNC(PyTensorObject_swapdims, functional::swapdims) +DIRECT_PASS_FUNC(PyTensorObject_unfold, functional::unfold_tensor) +DIRECT_PASS_FUNC(PyTensorObject_unsqueeze, functional::unsqueeze) +DIRECT_PASS_FUNC(PyTensorObject_max, functional::max) +DIRECT_PASS_FUNC(PyTensorObject_min, functional::min) +DIRECT_PASS_FUNC(PyTensorObject_median, functional::median) // functions that parsing at Python C api layer static PyObject* PyTensorObject_byte(PyObject* self, PyObject* unused) { @@ -289,18 +315,21 @@ static PyObject* PyTensorObject_get_device(PyObject* self, PyObject* unused) { return functional::CastToPyObject(ASSERT(PyTensor_Unpack(self)->device())->device_id()); END_HANDLE_ERRORS } -// -> + static PyObject* PyTensorObject_size(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS PyObject* idx_obj = Py_None; static const char* keywords[2] = {"idx", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:size", const_cast(keywords), &idx_obj)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:size", const_cast(keywords), + &idx_obj)) { return NULL; } auto shape = PyTensor_Unpack(self)->shape(); if (idx_obj == NULL || idx_obj == Py_None) return TensorSize_NewFromShape(*shape); int32_t idx = PyLong_AsLong(idx_obj); - CHECK_OR_THROW(idx >= -shape->NumAxes() && idx < shape->NumAxes()) << Error::IndexError() << "Dimension out of range (expected to be in range of [" << -shape->NumAxes() << ", " << shape->NumAxes() - 1 << "], but got "<< idx << ")"; + CHECK_OR_THROW(idx >= -shape->NumAxes() && idx < shape->NumAxes()) + << Error::IndexError() << "Dimension out of range (expected to be in range of [" + << -shape->NumAxes() << ", " << shape->NumAxes() - 1 << "], but got " << idx << ")"; idx = idx < 0 ? idx + shape->NumAxes() : idx; return PyLong_FromLong(shape->At(idx)); END_HANDLE_ERRORS @@ -365,6 +394,18 @@ static PyObject* PyTensorObject_matmul(PyObject* self, PyObject* args, PyObject* END_HANDLE_ERRORS } +static PyObject* PyTensorObject_add_(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + PyObjectPtr concat_args(concat_self(self, args)); + PyObjectPtr dict(PyDict_New()); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "inplace", Py_True) > -1); + if (kwargs != NULL) { CHECK_OR_THROW(PyDict_Merge(dict.get(), kwargs, 0) > -1); } + PyObject* result = functional::add(NULL, concat_args.get(), dict.get()); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; + END_HANDLE_ERRORS +} + static PyObject* PyTensorObject_sub_(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS PyObject* other = NULL; @@ -416,6 +457,7 @@ static PyObject* PyTensorObject_reshape_as(PyObject* self, PyObject* args, PyObj DATATYPE_FUNC(PyTensorObject_int, DType::Int32()); DATATYPE_FUNC(PyTensorObject_long, DType::Int64()); +DATATYPE_FUNC(PyTensorObject_half, DType::Float16()); DATATYPE_FUNC(PyTensorObject_float, DType::Float()); DATATYPE_FUNC(PyTensorObject_double, DType::Double()); @@ -493,11 +535,13 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"diag", (PyCFunction)PyTensorObject_diag, METH_VARARGS | METH_KEYWORDS, NULL}, {"diagonal", (PyCFunction)PyTensorObject_diagonal, METH_VARARGS | METH_KEYWORDS, NULL}, {"addcmul", (PyCFunction)PyTensorObject_addcmul, METH_VARARGS | METH_KEYWORDS, NULL}, - {"addcmul_", (PyCFunction)PyTensorObject_addcmul, METH_VARARGS | METH_KEYWORDS, NULL}, + {"addcmul_", (PyCFunction)PyTensorObject_addcmul_, METH_VARARGS | METH_KEYWORDS, NULL}, + {"add_", (PyCFunction)PyTensorObject_add_, METH_VARARGS | METH_KEYWORDS, NULL}, {"sub_", (PyCFunction)PyTensorObject_sub_, METH_VARARGS | METH_KEYWORDS, NULL}, {"matmul", (PyCFunction)PyTensorObject_matmul, METH_VARARGS | METH_KEYWORDS, NULL}, {"int", PyTensorObject_int, METH_NOARGS, NULL}, {"long", PyTensorObject_long, METH_NOARGS, NULL}, + {"half", PyTensorObject_half, METH_NOARGS, NULL}, {"float", PyTensorObject_float, METH_NOARGS, NULL}, {"double", PyTensorObject_double, METH_NOARGS, NULL}, @@ -510,6 +554,7 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"div_", (PyCFunction)PyTensorObject_div_, METH_VARARGS | METH_KEYWORDS, NULL}, {"mul", (PyCFunction)PyTensorObject_mul, METH_VARARGS | METH_KEYWORDS, NULL}, {"mul_", (PyCFunction)PyTensorObject_mul_, METH_VARARGS | METH_KEYWORDS, NULL}, + {"add", (PyCFunction)PyTensorObject_add, METH_VARARGS | METH_KEYWORDS, NULL}, {"sub", (PyCFunction)PyTensorObject_sub, METH_VARARGS | METH_KEYWORDS, NULL}, {"fmod", (PyCFunction)PyTensorObject_fmod, METH_VARARGS | METH_KEYWORDS, NULL}, {"logical_and", (PyCFunction)PyTensorObject_logical_and, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -519,6 +564,30 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"ne", (PyCFunction)PyTensorObject_ne, METH_VARARGS | METH_KEYWORDS, NULL}, {"lt", (PyCFunction)PyTensorObject_lt, METH_VARARGS | METH_KEYWORDS, NULL}, {"le", (PyCFunction)PyTensorObject_le, METH_VARARGS | METH_KEYWORDS, NULL}, + {"clip", (PyCFunction)PyTensorObject_clip, METH_VARARGS | METH_KEYWORDS, NULL}, + {"clip_", (PyCFunction)PyTensorObject_clip_, METH_VARARGS | METH_KEYWORDS, NULL}, + {"clamp", (PyCFunction)PyTensorObject_clamp, METH_VARARGS | METH_KEYWORDS, NULL}, + {"clamp_", (PyCFunction)PyTensorObject_clamp_, METH_VARARGS | METH_KEYWORDS, NULL}, + {"flatten", (PyCFunction)PyTensorObject_flatten, METH_VARARGS | METH_KEYWORDS, NULL}, + {"in_top_k", (PyCFunction)PyTensorObject_in_top_k, METH_VARARGS | METH_KEYWORDS, NULL}, + {"index_select", (PyCFunction)PyTensorObject_index_select, METH_VARARGS | METH_KEYWORDS, NULL}, + {"maximum", (PyCFunction)PyTensorObject_maximum, METH_VARARGS | METH_KEYWORDS, NULL}, + {"minimum", (PyCFunction)PyTensorObject_minimum, METH_VARARGS | METH_KEYWORDS, NULL}, + {"tril", (PyCFunction)PyTensorObject_tril, METH_VARARGS | METH_KEYWORDS, NULL}, + {"triu", (PyCFunction)PyTensorObject_triu, METH_VARARGS | METH_KEYWORDS, NULL}, + {"softmax", (PyCFunction)PyTensorObject_softmax, METH_VARARGS | METH_KEYWORDS, NULL}, + {"log_softmax", (PyCFunction)PyTensorObject_log_softmax, METH_VARARGS | METH_KEYWORDS, NULL}, + {"roll", (PyCFunction)PyTensorObject_roll, METH_VARARGS | METH_KEYWORDS, NULL}, + {"unbind", (PyCFunction)PyTensorObject_unbind, METH_VARARGS | METH_KEYWORDS, NULL}, + {"squeeze", (PyCFunction)PyTensorObject_squeeze, METH_VARARGS | METH_KEYWORDS, NULL}, + {"swapaxes", (PyCFunction)PyTensorObject_swapaxes, METH_VARARGS | METH_KEYWORDS, NULL}, + {"amax", (PyCFunction)PyTensorObject_amax, METH_VARARGS | METH_KEYWORDS, NULL}, + {"swapdims", (PyCFunction)PyTensorObject_swapdims, METH_VARARGS | METH_KEYWORDS, NULL}, + {"unfold", (PyCFunction)PyTensorObject_unfold, METH_VARARGS | METH_KEYWORDS, NULL}, + {"unsqueeze", (PyCFunction)PyTensorObject_unsqueeze, METH_VARARGS | METH_KEYWORDS, NULL}, + {"max", (PyCFunction)PyTensorObject_max, METH_VARARGS | METH_KEYWORDS, NULL}, + {"min", (PyCFunction)PyTensorObject_min, METH_VARARGS | METH_KEYWORDS, NULL}, + {"median", (PyCFunction)PyTensorObject_median, METH_VARARGS | METH_KEYWORDS, NULL}, // macro UNARY_METHOD {"abs", PyTensorObject_abs, METH_NOARGS, NULL}, diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index a2d15501605..88a982b570a 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -1166,22 +1166,22 @@ def RegisterMethods(): Tensor._meta_repr = _meta_repr Tensor.argsort = _argsort Tensor.argwhere = _argwhere - Tensor.add = _add - Tensor.add_ = _add_inplace - Tensor.clamp = _clamp - Tensor.clamp_ = _clamp_ - Tensor.clip = _clip - Tensor.clip_ = _clip_ + # Tensor.add = _add + # Tensor.add_ = _add_inplace + # Tensor.clamp = _clamp + # Tensor.clamp_ = _clamp_ + # Tensor.clip = _clip + # Tensor.clip_ = _clip_ Tensor.cpu = _cpu Tensor.cuda = _cuda Tensor.expand = _expand Tensor.expand_as = _expand_as - Tensor.flatten = _flatten + # Tensor.flatten = _flatten Tensor.flip = _flip - Tensor.in_top_k = _in_top_k - Tensor.index_select = _index_select - Tensor.minimum = _minimum - Tensor.maximum = _maximum + # Tensor.in_top_k = _in_top_k + # Tensor.index_select = _index_select + # Tensor.minimum = _minimum + # Tensor.maximum = _maximum Tensor.new_empty = _new_empty Tensor.new_ones = _new_ones Tensor.new_zeros = _new_zeros @@ -1189,8 +1189,8 @@ def RegisterMethods(): Tensor.var = _var Tensor.std = _std Tensor.softplus = _softplus - Tensor.tril = _tril - Tensor.triu = _triu + # Tensor.tril = _tril + # Tensor.triu = _triu Tensor.where = _where Tensor.norm = _norm Tensor.local_to_global = _local_to_global @@ -1198,23 +1198,23 @@ def RegisterMethods(): Tensor.to_global = _to_global Tensor.relu = _relu Tensor.relu_ = _relu_inplace - Tensor.softmax = _softmax - Tensor.log_softmax = _log_softmax - Tensor.roll = _roll + # Tensor.softmax = _softmax + # Tensor.log_softmax = _log_softmax + # Tensor.roll = _roll Tensor.chunk = _chunk Tensor.repeat = _repeat Tensor.tile = _tile Tensor.split = _split - Tensor.unbind = _unbind - Tensor.squeeze = _squeeze - Tensor.swapaxes = _swapaxes - Tensor.amax = _amax - Tensor.swapdims = _swapdims - Tensor.unfold = _unfold + # Tensor.unbind = _unbind + # Tensor.squeeze = _squeeze + # Tensor.swapaxes = _swapaxes + # Tensor.amax = _amax + # Tensor.swapdims = _swapdims + # Tensor.unfold = _unfold Tensor.narrow = _narrow - Tensor.unsqueeze = _unsqueeze + # Tensor.unsqueeze = _unsqueeze Tensor.to = _to - Tensor.half = _half + # Tensor.half = _half Tensor.gather = _gather Tensor.all = _all Tensor.any = _any @@ -1231,9 +1231,9 @@ def RegisterMethods(): Tensor.topk = _topk Tensor.nms = _nms Tensor.nonzero = _nonzero - Tensor.max = _max - Tensor.min = _min - Tensor.median = _median + # Tensor.max = _max + # Tensor.min = _min + # Tensor.median = _median Tensor.sum = _sum Tensor.mean = _mean Tensor.prod = _prod From 28600e476bcd16a6c14c8f969b734d108fff9cdb Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Wed, 1 Jun 2022 16:09:06 +0800 Subject: [PATCH 37/50] add cpu cuda var pow and etc. --- .../api/python/framework/tensor_functions.cpp | 95 +++++++++++++++++++ python/oneflow/framework/tensor.py | 18 ++-- 2 files changed, 104 insertions(+), 9 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 73929b7e2db..22e2908397a 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -281,6 +281,9 @@ DIRECT_PASS_FUNC(PyTensorObject_unsqueeze, functional::unsqueeze) DIRECT_PASS_FUNC(PyTensorObject_max, functional::max) DIRECT_PASS_FUNC(PyTensorObject_min, functional::min) DIRECT_PASS_FUNC(PyTensorObject_median, functional::median) +DIRECT_PASS_FUNC(PyTensorObject_pow, functional::pow) +DIRECT_PASS_FUNC(PyTensorObject_chunk, functional::chunk) +DIRECT_PASS_FUNC(PyTensorObject_narrow, functional::narrow) // functions that parsing at Python C api layer static PyObject* PyTensorObject_byte(PyObject* self, PyObject* unused) { @@ -447,6 +450,88 @@ static PyObject* PyTensorObject_reshape_as(PyObject* self, PyObject* args, PyObj END_HANDLE_ERRORS } +static PyObject* PyTensorObject_cpu(PyObject* self, PyObject* unused) { + HANDLE_ERRORS + Optional device = "cpu"; + return PyTensor_New(ASSERT_PTR(functional::To(PyTensor_Unpack(self), device, NullOpt, false))); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_cuda(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + PyObject* device_obj = Py_None; + static const char* keywords[2] = {"device", NULL}; + if(!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:cuda", const_cast(keywords), &device_obj)) { + return NULL; + } + PyObjectPtr dict(PyDict_New()); + if(device_obj == Py_None) { + device_obj = PyUnicode_FromString("cuda"); + } + else if(PyLong_Check(device_obj)) { + std::string device_str = "cuda:" + std::to_string(PyLong_AsLongLong(device_obj)); + device_obj = PyUnicode_FromString(device_str.c_str()); + } + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "device", device_obj) > -1); + PyObjectPtr tuple(PyTuple_Pack(1, self)); + PyObject* result = functional::to(NULL, tuple.get(), dict.get()); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_var(PyObject* self, PyObject* args, PyObject* kwargs){ + HANDLE_ERRORS + PyObjectPtr dict(PyDict_New()); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "unbiased", Py_True) > -1); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "keepdim", Py_False) > -1); + if(kwargs != NULL) { + CHECK_OR_THROW(PyDict_Merge(dict.get(), kwargs, 1) > -1); + } + PyObjectPtr concat_args(concat_self(self, args)); + PyObject* result = functional::var(NULL, concat_args.get(), dict.get()); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_std(PyObject* self, PyObject* args, PyObject* kwargs){ + HANDLE_ERRORS + PyObjectPtr dict(PyDict_New()); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "unbiased", Py_True) > -1); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "keepdim", Py_False) > -1); + if(kwargs != NULL) { + CHECK_OR_THROW(PyDict_Merge(dict.get(), kwargs, 1) > -1); + } + PyObjectPtr concat_args(concat_self(self, args)); + PyObject* result = functional::std(NULL, concat_args.get(), dict.get()); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_softplus(PyObject* self, PyObject* unused) { + HANDLE_ERRORS + PyObjectPtr concat_args(PyTuple_Pack(1, self)); + PyObject* result = functional::softplus(NULL, concat_args.get(), NULL); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_relu(PyObject* self, PyObject* unused) { + HANDLE_ERRORS + return PyTensor_New(ASSERT_PTR(functional::Relu(PyTensor_Unpack(self), false))); + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_relu_(PyObject* self, PyObject* unused) { + HANDLE_ERRORS + return PyTensor_New(ASSERT_PTR(functional::Relu(PyTensor_Unpack(self), true))); + END_HANDLE_ERRORS +} + + #define DATATYPE_FUNC(func_name, dtype) \ static PyObject* func_name(PyObject* self, PyObject* unused) { \ HANDLE_ERRORS \ @@ -544,6 +629,13 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"half", PyTensorObject_half, METH_NOARGS, NULL}, {"float", PyTensorObject_float, METH_NOARGS, NULL}, {"double", PyTensorObject_double, METH_NOARGS, NULL}, + {"cpu", PyTensorObject_cpu, METH_NOARGS, NULL}, + {"cuda", (PyCFunction)PyTensorObject_cuda, METH_VARARGS | METH_KEYWORDS, NULL}, + {"var", (PyCFunction)PyTensorObject_var, METH_VARARGS | METH_KEYWORDS, NULL}, + {"std", (PyCFunction)PyTensorObject_std, METH_VARARGS | METH_KEYWORDS, NULL}, + {"softplus", PyTensorObject_softplus, METH_NOARGS, NULL}, + {"relu", PyTensorObject_relu, METH_NOARGS, NULL}, + {"relu_", PyTensorObject_relu_, METH_NOARGS, NULL}, // macro DIRECT_PASS_FUNC {"floor_divide", (PyCFunction)PyTensorObject_floor_divide, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -588,6 +680,9 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"max", (PyCFunction)PyTensorObject_max, METH_VARARGS | METH_KEYWORDS, NULL}, {"min", (PyCFunction)PyTensorObject_min, METH_VARARGS | METH_KEYWORDS, NULL}, {"median", (PyCFunction)PyTensorObject_median, METH_VARARGS | METH_KEYWORDS, NULL}, + {"pow", (PyCFunction)PyTensorObject_pow, METH_VARARGS | METH_KEYWORDS, NULL}, + {"chunk", (PyCFunction)PyTensorObject_chunk, METH_VARARGS | METH_KEYWORDS, NULL}, + {"narrow", (PyCFunction)PyTensorObject_narrow, METH_VARARGS | METH_KEYWORDS, NULL}, // macro UNARY_METHOD {"abs", PyTensorObject_abs, METH_NOARGS, NULL}, diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index 88a982b570a..532c64d70bf 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -1172,8 +1172,8 @@ def RegisterMethods(): # Tensor.clamp_ = _clamp_ # Tensor.clip = _clip # Tensor.clip_ = _clip_ - Tensor.cpu = _cpu - Tensor.cuda = _cuda + # Tensor.cpu = _cpu + # Tensor.cuda = _cuda Tensor.expand = _expand Tensor.expand_as = _expand_as # Tensor.flatten = _flatten @@ -1185,10 +1185,10 @@ def RegisterMethods(): Tensor.new_empty = _new_empty Tensor.new_ones = _new_ones Tensor.new_zeros = _new_zeros - Tensor.pow = _pow - Tensor.var = _var - Tensor.std = _std - Tensor.softplus = _softplus + # Tensor.pow = _pow + # Tensor.var = _var + # Tensor.std = _std + # Tensor.softplus = _softplus # Tensor.tril = _tril # Tensor.triu = _triu Tensor.where = _where @@ -1196,12 +1196,12 @@ def RegisterMethods(): Tensor.local_to_global = _local_to_global Tensor.global_to_global = _global_to_global Tensor.to_global = _to_global - Tensor.relu = _relu - Tensor.relu_ = _relu_inplace + # Tensor.relu = _relu + # Tensor.relu_ = _relu_inplace # Tensor.softmax = _softmax # Tensor.log_softmax = _log_softmax # Tensor.roll = _roll - Tensor.chunk = _chunk + # Tensor.chunk = _chunk Tensor.repeat = _repeat Tensor.tile = _tile Tensor.split = _split From 1723d74055fe79f14cc3e79d123ec22ad19c9378 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Wed, 1 Jun 2022 16:37:44 +0800 Subject: [PATCH 38/50] add masked_fill any all --- .../api/python/framework/tensor_functions.cpp | 26 +++++++++++++++++++ python/oneflow/framework/tensor.py | 6 ++--- 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 22e2908397a..6a70a71c0dd 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -284,6 +284,7 @@ DIRECT_PASS_FUNC(PyTensorObject_median, functional::median) DIRECT_PASS_FUNC(PyTensorObject_pow, functional::pow) DIRECT_PASS_FUNC(PyTensorObject_chunk, functional::chunk) DIRECT_PASS_FUNC(PyTensorObject_narrow, functional::narrow) +DIRECT_PASS_FUNC(PyTensorObject_masked_fill, functional::masked_fill) // functions that parsing at Python C api layer static PyObject* PyTensorObject_byte(PyObject* self, PyObject* unused) { @@ -531,6 +532,28 @@ static PyObject* PyTensorObject_relu_(PyObject* self, PyObject* unused) { END_HANDLE_ERRORS } +static PyObject* PyTensorObject_all(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + if(args == NULL && kwargs == NULL) + return PyTensor_New(ASSERT_PTR(functional::ReduceAllWhole(PyTensor_Unpack(self)))); + PyObjectPtr concat_args(concat_self(self, args)); + PyObject* result = functional::reduce_all(NULL, concat_args.get(), kwargs); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; + END_HANDLE_ERRORS +} + +static PyObject* PyTensorObject_any(PyObject* self, PyObject* args, PyObject* kwargs) { + HANDLE_ERRORS + if(args == NULL && kwargs == NULL) + return PyTensor_New(ASSERT_PTR(functional::ReduceAnyWhole(PyTensor_Unpack(self)))); + PyObjectPtr concat_args(concat_self(self, args)); + PyObject* result = functional::reduce_any(NULL, concat_args.get(), kwargs); + if (PyErr_Occurred()) { throw py::error_already_set(); } + return result; + END_HANDLE_ERRORS +} + #define DATATYPE_FUNC(func_name, dtype) \ static PyObject* func_name(PyObject* self, PyObject* unused) { \ @@ -636,6 +659,8 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"softplus", PyTensorObject_softplus, METH_NOARGS, NULL}, {"relu", PyTensorObject_relu, METH_NOARGS, NULL}, {"relu_", PyTensorObject_relu_, METH_NOARGS, NULL}, + {"all", (PyCFunction)PyTensorObject_all, METH_VARARGS | METH_KEYWORDS, NULL}, + {"any", (PyCFunction)PyTensorObject_any, METH_VARARGS | METH_KEYWORDS, NULL}, // macro DIRECT_PASS_FUNC {"floor_divide", (PyCFunction)PyTensorObject_floor_divide, METH_VARARGS | METH_KEYWORDS, NULL}, @@ -683,6 +708,7 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"pow", (PyCFunction)PyTensorObject_pow, METH_VARARGS | METH_KEYWORDS, NULL}, {"chunk", (PyCFunction)PyTensorObject_chunk, METH_VARARGS | METH_KEYWORDS, NULL}, {"narrow", (PyCFunction)PyTensorObject_narrow, METH_VARARGS | METH_KEYWORDS, NULL}, + {"masked_fill", (PyCFunction)PyTensorObject_masked_fill, METH_VARARGS | METH_KEYWORDS, NULL}, // macro UNARY_METHOD {"abs", PyTensorObject_abs, METH_NOARGS, NULL}, diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index 532c64d70bf..da85b74ab6c 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -1216,10 +1216,10 @@ def RegisterMethods(): Tensor.to = _to # Tensor.half = _half Tensor.gather = _gather - Tensor.all = _all - Tensor.any = _any + # Tensor.all = _all + # Tensor.any = _any Tensor.T = property(_T) - Tensor.masked_fill = _masked_fill + # Tensor.masked_fill = _masked_fill Tensor.masked_select = _masked_select Tensor.eq = _eq Tensor.item = _item From 9221c059b44ea7e47c4c9439bad9fa8cb6642639 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Wed, 1 Jun 2022 20:16:57 +0800 Subject: [PATCH 39/50] make REDUCE_FUNC macro, add reduce_* functions --- .../api/python/framework/tensor_functions.cpp | 74 +++++++++---------- python/oneflow/framework/tensor.py | 45 +---------- 2 files changed, 38 insertions(+), 81 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 3c22f106399..0c9907030e9 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -22,6 +22,7 @@ limitations under the License. #include "oneflow/core/common/shape_vec.h" #include "oneflow/core/functional/functional.h" #include "oneflow/core/common/shape.h" +#include "oneflow/core/functional/functional_api.yaml.h" namespace oneflow { namespace one { @@ -463,18 +464,18 @@ static PyObject* PyTensorObject_cuda(PyObject* self, PyObject* args, PyObject* k HANDLE_ERRORS PyObject* device_obj = Py_None; static const char* keywords[2] = {"device", NULL}; - if(!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:cuda", const_cast(keywords), &device_obj)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O:cuda", const_cast(keywords), + &device_obj)) { return NULL; } PyObjectPtr dict(PyDict_New()); - if(device_obj == Py_None) { + if (device_obj == Py_None) { device_obj = PyUnicode_FromString("cuda"); - } - else if(PyLong_Check(device_obj)) { + } else if (PyLong_Check(device_obj)) { std::string device_str = "cuda:" + std::to_string(PyLong_AsLongLong(device_obj)); device_obj = PyUnicode_FromString(device_str.c_str()); - } - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "device", device_obj) > -1); + } + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "device", device_obj) > -1); PyObjectPtr tuple(PyTuple_Pack(1, self)); PyObject* result = functional::to(NULL, tuple.get(), dict.get()); if (PyErr_Occurred()) { throw py::error_already_set(); } @@ -482,14 +483,12 @@ static PyObject* PyTensorObject_cuda(PyObject* self, PyObject* args, PyObject* k END_HANDLE_ERRORS } -static PyObject* PyTensorObject_var(PyObject* self, PyObject* args, PyObject* kwargs){ +static PyObject* PyTensorObject_var(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "unbiased", Py_True) > -1); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "keepdim", Py_False) > -1); - if(kwargs != NULL) { - CHECK_OR_THROW(PyDict_Merge(dict.get(), kwargs, 1) > -1); - } + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "unbiased", Py_True) > -1); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "keepdim", Py_False) > -1); + if (kwargs != NULL) { CHECK_OR_THROW(PyDict_Merge(dict.get(), kwargs, 1) > -1); } PyObjectPtr concat_args(concat_self(self, args)); PyObject* result = functional::var(NULL, concat_args.get(), dict.get()); if (PyErr_Occurred()) { throw py::error_already_set(); } @@ -497,14 +496,12 @@ static PyObject* PyTensorObject_var(PyObject* self, PyObject* args, PyObject* kw END_HANDLE_ERRORS } -static PyObject* PyTensorObject_std(PyObject* self, PyObject* args, PyObject* kwargs){ +static PyObject* PyTensorObject_std(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "unbiased", Py_True) > -1); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "keepdim", Py_False) > -1); - if(kwargs != NULL) { - CHECK_OR_THROW(PyDict_Merge(dict.get(), kwargs, 1) > -1); - } + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "unbiased", Py_True) > -1); + CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "keepdim", Py_False) > -1); + if (kwargs != NULL) { CHECK_OR_THROW(PyDict_Merge(dict.get(), kwargs, 1) > -1); } PyObjectPtr concat_args(concat_self(self, args)); PyObject* result = functional::std(NULL, concat_args.get(), dict.get()); if (PyErr_Occurred()) { throw py::error_already_set(); } @@ -533,28 +530,24 @@ static PyObject* PyTensorObject_relu_(PyObject* self, PyObject* unused) { END_HANDLE_ERRORS } -static PyObject* PyTensorObject_all(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - if(args == NULL && kwargs == NULL) - return PyTensor_New(ASSERT_PTR(functional::ReduceAllWhole(PyTensor_Unpack(self)))); - PyObjectPtr concat_args(concat_self(self, args)); - PyObject* result = functional::reduce_all(NULL, concat_args.get(), kwargs); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_any(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - if(args == NULL && kwargs == NULL) - return PyTensor_New(ASSERT_PTR(functional::ReduceAnyWhole(PyTensor_Unpack(self)))); - PyObjectPtr concat_args(concat_self(self, args)); - PyObject* result = functional::reduce_any(NULL, concat_args.get(), kwargs); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} +#define REDUCE_FUNC(func_name, bind_func, whole_func) \ + static PyObject* func_name(PyObject* self, PyObject* args, PyObject* kwargs) { \ + HANDLE_ERRORS \ + if (args == NULL && kwargs == NULL) { \ + return PyTensor_New(ASSERT_PTR(whole_func(PyTensor_Unpack(self)))); \ + } \ + PyObjectPtr concat_args(concat_self(self, args)); \ + PyObject* result = bind_func(NULL, concat_args.get(), kwargs); \ + if (PyErr_Occurred()) { throw py::error_already_set(); } \ + return result; \ + END_HANDLE_ERRORS \ + } +REDUCE_FUNC(PyTensorObject_any, functional::reduce_any, functional::ReduceAnyWhole) +REDUCE_FUNC(PyTensorObject_all, functional::reduce_all, functional::ReduceAllWhole) +REDUCE_FUNC(PyTensorObject_sum, functional::reduce_sum, functional::ReduceSumWhole) +REDUCE_FUNC(PyTensorObject_prod, functional::reduce_prod, functional::ReduceProdWhole) +REDUCE_FUNC(PyTensorObject_mean, functional::reduce_mean, functional::ReduceAnyWhole) #define DATATYPE_FUNC(func_name, dtype) \ static PyObject* func_name(PyObject* self, PyObject* unused) { \ @@ -662,6 +655,9 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"relu_", PyTensorObject_relu_, METH_NOARGS, NULL}, {"all", (PyCFunction)PyTensorObject_all, METH_VARARGS | METH_KEYWORDS, NULL}, {"any", (PyCFunction)PyTensorObject_any, METH_VARARGS | METH_KEYWORDS, NULL}, + {"sum", (PyCFunction)PyTensorObject_sum, METH_VARARGS | METH_KEYWORDS, NULL}, + {"mean", (PyCFunction)PyTensorObject_mean, METH_VARARGS | METH_KEYWORDS, NULL}, + {"prod", (PyCFunction)PyTensorObject_prod, METH_VARARGS | METH_KEYWORDS, NULL}, // macro DIRECT_PASS_FUNC {"floor_divide", (PyCFunction)PyTensorObject_floor_divide, METH_VARARGS | METH_KEYWORDS, NULL}, diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index c46e68022c1..0ee1c083d69 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -1160,60 +1160,24 @@ def RegisterMethods(): Tensor._meta_repr = _meta_repr Tensor.argsort = _argsort Tensor.argwhere = _argwhere - # Tensor.add = _add - # Tensor.add_ = _add_inplace - # Tensor.clamp = _clamp - # Tensor.clamp_ = _clamp_ - # Tensor.clip = _clip - # Tensor.clip_ = _clip_ - # Tensor.cpu = _cpu - # Tensor.cuda = _cuda Tensor.expand = _expand Tensor.expand_as = _expand_as - # Tensor.flatten = _flatten Tensor.flip = _flip - # Tensor.in_top_k = _in_top_k - # Tensor.index_select = _index_select - # Tensor.minimum = _minimum - # Tensor.maximum = _maximum Tensor.new_empty = _new_empty Tensor.new_ones = _new_ones Tensor.new_zeros = _new_zeros - # Tensor.pow = _pow - # Tensor.var = _var - # Tensor.std = _std - # Tensor.softplus = _softplus - # Tensor.tril = _tril - # Tensor.triu = _triu Tensor.where = _where Tensor.norm = _norm Tensor.local_to_global = _local_to_global Tensor.global_to_global = _global_to_global Tensor.to_global = _to_global - # Tensor.relu = _relu - # Tensor.relu_ = _relu_inplace - # Tensor.softmax = _softmax - # Tensor.log_softmax = _log_softmax - # Tensor.roll = _roll - # Tensor.chunk = _chunk Tensor.repeat = _repeat Tensor.tile = _tile Tensor.split = _split - # Tensor.unbind = _unbind - # Tensor.squeeze = _squeeze - # Tensor.swapaxes = _swapaxes - # Tensor.amax = _amax - # Tensor.swapdims = _swapdims - # Tensor.unfold = _unfold Tensor.narrow = _narrow - # Tensor.unsqueeze = _unsqueeze Tensor.to = _to - # Tensor.half = _half Tensor.gather = _gather - # Tensor.all = _all - # Tensor.any = _any Tensor.T = property(_T) - # Tensor.masked_fill = _masked_fill Tensor.masked_select = _masked_select Tensor.eq = _eq Tensor.item = _item @@ -1225,12 +1189,9 @@ def RegisterMethods(): Tensor.topk = _topk Tensor.nms = _nms Tensor.nonzero = _nonzero - # Tensor.max = _max - # Tensor.min = _min - # Tensor.median = _median - Tensor.sum = _sum - Tensor.mean = _mean - Tensor.prod = _prod + # Tensor.sum = _sum + # Tensor.mean = _mean + # Tensor.prod = _prod Tensor.is_consistent = _is_consistent Tensor.to_consistent = _to_consistent Tensor.new_tensor = _new_tensor From a39b38ab097a385e6b23b4b6d1cff506cab4a955 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Thu, 2 Jun 2022 10:41:16 +0800 Subject: [PATCH 40/50] add 0dim check in ReduceSumWhole, refine yaml --- oneflow/core/functional/functional_api.yaml | 6 ++++-- oneflow/core/functional/impl/math_functor.cpp | 1 + 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/oneflow/core/functional/functional_api.yaml b/oneflow/core/functional/functional_api.yaml index 9c6f067073a..57b01335736 100755 --- a/oneflow/core/functional/functional_api.yaml +++ b/oneflow/core/functional/functional_api.yaml @@ -292,8 +292,10 @@ bind_python: True - name: "reduce_mean" - signature: ["Tensor (Tensor x, Int32List[1] dim, Bool keepdim=False) => ReduceMean", - "Tensor (Tensor x) => ReduceMeanWhole"] + signature: [ + "Tensor (Tensor x, Int32List[1] dim, Bool keepdim=False) => ReduceMean", + "Tensor (Tensor x) => ReduceMeanWhole" + ] bind_python: True - name: "reduce_all" diff --git a/oneflow/core/functional/impl/math_functor.cpp b/oneflow/core/functional/impl/math_functor.cpp index 8065a04aaa4..8b3cba89a54 100644 --- a/oneflow/core/functional/impl/math_functor.cpp +++ b/oneflow/core/functional/impl/math_functor.cpp @@ -458,6 +458,7 @@ class ReduceSumWholeFunctor { Maybe operator()(const std::shared_ptr& x) const { MutableAttrMap attrs; const int32_t naxis = x->ndim(); + if (naxis == 0) { return x; } // for 0-dim Tensor std::vector axis(naxis); std::iota(axis.begin(), axis.end(), 0); JUST(attrs.SetAttr>("axis", axis)); From 60c4b5ebb2c1677255794e727daa1daf4f3b8ebe Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Thu, 2 Jun 2022 10:46:26 +0800 Subject: [PATCH 41/50] fix bug --- oneflow/api/python/framework/tensor_functions.cpp | 7 +++---- python/oneflow/framework/tensor.py | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 0c9907030e9..f83fb92e8e4 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -533,7 +533,8 @@ static PyObject* PyTensorObject_relu_(PyObject* self, PyObject* unused) { #define REDUCE_FUNC(func_name, bind_func, whole_func) \ static PyObject* func_name(PyObject* self, PyObject* args, PyObject* kwargs) { \ HANDLE_ERRORS \ - if (args == NULL && kwargs == NULL) { \ + if ((args == NULL || PyTuple_Size(args) == 0) \ + && (kwargs == NULL || PyDict_Size(kwargs) == 0)) { \ return PyTensor_New(ASSERT_PTR(whole_func(PyTensor_Unpack(self)))); \ } \ PyObjectPtr concat_args(concat_self(self, args)); \ @@ -546,8 +547,7 @@ static PyObject* PyTensorObject_relu_(PyObject* self, PyObject* unused) { REDUCE_FUNC(PyTensorObject_any, functional::reduce_any, functional::ReduceAnyWhole) REDUCE_FUNC(PyTensorObject_all, functional::reduce_all, functional::ReduceAllWhole) REDUCE_FUNC(PyTensorObject_sum, functional::reduce_sum, functional::ReduceSumWhole) -REDUCE_FUNC(PyTensorObject_prod, functional::reduce_prod, functional::ReduceProdWhole) -REDUCE_FUNC(PyTensorObject_mean, functional::reduce_mean, functional::ReduceAnyWhole) +REDUCE_FUNC(PyTensorObject_mean, functional::reduce_mean, functional::ReduceMeanWhole) #define DATATYPE_FUNC(func_name, dtype) \ static PyObject* func_name(PyObject* self, PyObject* unused) { \ @@ -657,7 +657,6 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"any", (PyCFunction)PyTensorObject_any, METH_VARARGS | METH_KEYWORDS, NULL}, {"sum", (PyCFunction)PyTensorObject_sum, METH_VARARGS | METH_KEYWORDS, NULL}, {"mean", (PyCFunction)PyTensorObject_mean, METH_VARARGS | METH_KEYWORDS, NULL}, - {"prod", (PyCFunction)PyTensorObject_prod, METH_VARARGS | METH_KEYWORDS, NULL}, // macro DIRECT_PASS_FUNC {"floor_divide", (PyCFunction)PyTensorObject_floor_divide, METH_VARARGS | METH_KEYWORDS, NULL}, diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index 0ee1c083d69..17b903191f8 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -1191,7 +1191,7 @@ def RegisterMethods(): Tensor.nonzero = _nonzero # Tensor.sum = _sum # Tensor.mean = _mean - # Tensor.prod = _prod + Tensor.prod = _prod Tensor.is_consistent = _is_consistent Tensor.to_consistent = _to_consistent Tensor.new_tensor = _new_tensor From 03675885df0f0bd66c3ae21d806371d6aec243d4 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Thu, 2 Jun 2022 15:57:55 +0800 Subject: [PATCH 42/50] restore add add_ sub sub_ --- .../api/python/framework/tensor_functions.cpp | 38 +++---------------- python/oneflow/framework/tensor.py | 4 ++ 2 files changed, 10 insertions(+), 32 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index f83fb92e8e4..1e8fa507186 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -22,7 +22,6 @@ limitations under the License. #include "oneflow/core/common/shape_vec.h" #include "oneflow/core/functional/functional.h" #include "oneflow/core/common/shape.h" -#include "oneflow/core/functional/functional_api.yaml.h" namespace oneflow { namespace one { @@ -242,8 +241,8 @@ DIRECT_PASS_FUNC(PyTensorObject_div, functional::div) DIRECT_PASS_FUNC(PyTensorObject_div_, functional::div_) DIRECT_PASS_FUNC(PyTensorObject_mul, functional::mul) DIRECT_PASS_FUNC(PyTensorObject_mul_, functional::mul_) -DIRECT_PASS_FUNC(PyTensorObject_add, functional::add) -DIRECT_PASS_FUNC(PyTensorObject_sub, functional::sub) +// DIRECT_PASS_FUNC(PyTensorObject_add, functional::add) +// DIRECT_PASS_FUNC(PyTensorObject_sub, functional::sub) DIRECT_PASS_FUNC(PyTensorObject_fmod, functional::fmod) DIRECT_PASS_FUNC(PyTensorObject_logical_and, functional::logical_and) DIRECT_PASS_FUNC(PyTensorObject_logical_or, functional::logical_or) @@ -400,31 +399,6 @@ static PyObject* PyTensorObject_matmul(PyObject* self, PyObject* args, PyObject* END_HANDLE_ERRORS } -static PyObject* PyTensorObject_add_(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - PyObjectPtr concat_args(concat_self(self, args)); - PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "inplace", Py_True) > -1); - if (kwargs != NULL) { CHECK_OR_THROW(PyDict_Merge(dict.get(), kwargs, 0) > -1); } - PyObject* result = functional::add(NULL, concat_args.get(), dict.get()); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - -static PyObject* PyTensorObject_sub_(PyObject* self, PyObject* args, PyObject* kwargs) { - HANDLE_ERRORS - PyObject* other = NULL; - static const char* keywords[2] = {"other", NULL}; - if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O:sub_", const_cast(keywords), &other)) { - return NULL; - } - PyObject* result = PyTensorObject_nb_inplace_sub(self, other); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; - END_HANDLE_ERRORS -} - static PyObject* PyTensorObject_reshape(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS PyObject* shape = args; @@ -638,8 +612,8 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"diagonal", (PyCFunction)PyTensorObject_diagonal, METH_VARARGS | METH_KEYWORDS, NULL}, {"addcmul", (PyCFunction)PyTensorObject_addcmul, METH_VARARGS | METH_KEYWORDS, NULL}, {"addcmul_", (PyCFunction)PyTensorObject_addcmul_, METH_VARARGS | METH_KEYWORDS, NULL}, - {"add_", (PyCFunction)PyTensorObject_add_, METH_VARARGS | METH_KEYWORDS, NULL}, - {"sub_", (PyCFunction)PyTensorObject_sub_, METH_VARARGS | METH_KEYWORDS, NULL}, + // {"add_", (PyCFunction)PyTensorObject_add_, METH_VARARGS | METH_KEYWORDS, NULL}, + // {"sub_", (PyCFunction)PyTensorObject_sub_, METH_VARARGS | METH_KEYWORDS, NULL}, {"matmul", (PyCFunction)PyTensorObject_matmul, METH_VARARGS | METH_KEYWORDS, NULL}, {"int", PyTensorObject_int, METH_NOARGS, NULL}, {"long", PyTensorObject_long, METH_NOARGS, NULL}, @@ -667,8 +641,8 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"div_", (PyCFunction)PyTensorObject_div_, METH_VARARGS | METH_KEYWORDS, NULL}, {"mul", (PyCFunction)PyTensorObject_mul, METH_VARARGS | METH_KEYWORDS, NULL}, {"mul_", (PyCFunction)PyTensorObject_mul_, METH_VARARGS | METH_KEYWORDS, NULL}, - {"add", (PyCFunction)PyTensorObject_add, METH_VARARGS | METH_KEYWORDS, NULL}, - {"sub", (PyCFunction)PyTensorObject_sub, METH_VARARGS | METH_KEYWORDS, NULL}, + // {"add", (PyCFunction)PyTensorObject_add, METH_VARARGS | METH_KEYWORDS, NULL}, + // {"sub", (PyCFunction)PyTensorObject_sub, METH_VARARGS | METH_KEYWORDS, NULL}, {"fmod", (PyCFunction)PyTensorObject_fmod, METH_VARARGS | METH_KEYWORDS, NULL}, {"logical_and", (PyCFunction)PyTensorObject_logical_and, METH_VARARGS | METH_KEYWORDS, NULL}, {"logical_or", (PyCFunction)PyTensorObject_logical_or, METH_VARARGS | METH_KEYWORDS, NULL}, diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index 17b903191f8..8b6e93ebddb 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -1135,6 +1135,10 @@ def _cumprod(self, dim, dtype=None): def RegisterMethods(): Tensor.ndim = property(_ndim) Tensor.numpy = _numpy + Tensor.add = _add + Tensor.add_ = _add_inplace + Tensor.sub = _sub + Tensor.sub_ = _sub_inplace Tensor.backward = _backward Tensor.__setitem__ = _setitem Tensor.__str__ = _str From 32715a27dba9d286e505a43a8aaa4185011ffada Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Thu, 2 Jun 2022 15:58:21 +0800 Subject: [PATCH 43/50] add unittest for tensor.half tensor.add tensor.add_ --- .../oneflow/test/modules/test_tensor_ops.py | 14 +++++++++++ .../oneflow/test/tensor/test_tensor_part_1.py | 24 +++++++++++++++++++ 2 files changed, 38 insertions(+) diff --git a/python/oneflow/test/modules/test_tensor_ops.py b/python/oneflow/test/modules/test_tensor_ops.py index 2894c5373ea..07d3252a614 100644 --- a/python/oneflow/test/modules/test_tensor_ops.py +++ b/python/oneflow/test/modules/test_tensor_ops.py @@ -168,6 +168,20 @@ def test_int_0dim(test_case): y = x.int() return y + @autotest(n=20, auto_backward=False, rtol=1e-4, atol=1e-4, check_graph=True) + def test_half(test_case): + device = random_device() + x = random_tensor(dtype=int).to(device) + y = x.half() + return y + + @autotest(n=20, auto_backward=False, rtol=1e-4, atol=1e-4, check_graph=True) + def test_half_0dim(test_case): + device = random_device() + x = random_tensor(ndim=0, dtype=int).to(device) + y = x.half() + return y + @autotest(n=20, auto_backward=False, rtol=1e-4, atol=1e-4, check_graph=True) def test_float(test_case): device = random_device() diff --git a/python/oneflow/test/tensor/test_tensor_part_1.py b/python/oneflow/test/tensor/test_tensor_part_1.py index d335ddfeb33..55da4a4a373 100644 --- a/python/oneflow/test/tensor/test_tensor_part_1.py +++ b/python/oneflow/test/tensor/test_tensor_part_1.py @@ -581,6 +581,30 @@ def test_broadcast_div_inplace_tensor(test_case): y.div_(x) return y + @flow.unittest.skip_unless_1n1d() + @autotest(check_graph=True) + def test_add_inplace_tensor(test_case): + device = random_device() + rand_tensor = random_tensor( + low=-2, high=2, ndim=4, dim0=6, dim1=9, dim2=14, dim3=17 + ).to(device) + y = rand_tensor + 1 + x = random_tensor(low=-2, high=2, ndim=4, dim0=6, dim1=9, dim2=14, dim3=17).to( + device + ) + y.add_(x) + return y + + @flow.unittest.skip_unless_1n1d() + @autotest(check_graph=True) + def test_broadcast_add_inplace_tensor(test_case): + device = random_device() + rand_tensor = random_tensor(ndim=3, dim0=5, dim1=9, dim2=23).to(device) + y = rand_tensor + 1 + x = random_tensor(ndim=2, dim0=9, dim1=23).to(device) + y.add_(x) + return y + @flow.unittest.skip_unless_1n1d() @autotest(check_graph=True) def test_sub_inplace_tensor(test_case): From 3622f6833cd49080280a194fe6efbc9b70679ca6 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Thu, 2 Jun 2022 16:05:32 +0800 Subject: [PATCH 44/50] refine code --- oneflow/api/python/framework/tensor_functions.cpp | 4 ---- python/oneflow/framework/tensor.py | 2 -- 2 files changed, 6 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 1e8fa507186..fb4d945becb 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -612,8 +612,6 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"diagonal", (PyCFunction)PyTensorObject_diagonal, METH_VARARGS | METH_KEYWORDS, NULL}, {"addcmul", (PyCFunction)PyTensorObject_addcmul, METH_VARARGS | METH_KEYWORDS, NULL}, {"addcmul_", (PyCFunction)PyTensorObject_addcmul_, METH_VARARGS | METH_KEYWORDS, NULL}, - // {"add_", (PyCFunction)PyTensorObject_add_, METH_VARARGS | METH_KEYWORDS, NULL}, - // {"sub_", (PyCFunction)PyTensorObject_sub_, METH_VARARGS | METH_KEYWORDS, NULL}, {"matmul", (PyCFunction)PyTensorObject_matmul, METH_VARARGS | METH_KEYWORDS, NULL}, {"int", PyTensorObject_int, METH_NOARGS, NULL}, {"long", PyTensorObject_long, METH_NOARGS, NULL}, @@ -641,8 +639,6 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"div_", (PyCFunction)PyTensorObject_div_, METH_VARARGS | METH_KEYWORDS, NULL}, {"mul", (PyCFunction)PyTensorObject_mul, METH_VARARGS | METH_KEYWORDS, NULL}, {"mul_", (PyCFunction)PyTensorObject_mul_, METH_VARARGS | METH_KEYWORDS, NULL}, - // {"add", (PyCFunction)PyTensorObject_add, METH_VARARGS | METH_KEYWORDS, NULL}, - // {"sub", (PyCFunction)PyTensorObject_sub, METH_VARARGS | METH_KEYWORDS, NULL}, {"fmod", (PyCFunction)PyTensorObject_fmod, METH_VARARGS | METH_KEYWORDS, NULL}, {"logical_and", (PyCFunction)PyTensorObject_logical_and, METH_VARARGS | METH_KEYWORDS, NULL}, {"logical_or", (PyCFunction)PyTensorObject_logical_or, METH_VARARGS | METH_KEYWORDS, NULL}, diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index 8b6e93ebddb..1ef34bc3dc1 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -1193,8 +1193,6 @@ def RegisterMethods(): Tensor.topk = _topk Tensor.nms = _nms Tensor.nonzero = _nonzero - # Tensor.sum = _sum - # Tensor.mean = _mean Tensor.prod = _prod Tensor.is_consistent = _is_consistent Tensor.to_consistent = _to_consistent From 0f2fa6f073172a582c259b81e535786306368ab6 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Thu, 2 Jun 2022 16:08:12 +0800 Subject: [PATCH 45/50] refine code --- oneflow/api/python/framework/tensor_functions.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index fb4d945becb..6a8aa8e33e0 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -241,8 +241,6 @@ DIRECT_PASS_FUNC(PyTensorObject_div, functional::div) DIRECT_PASS_FUNC(PyTensorObject_div_, functional::div_) DIRECT_PASS_FUNC(PyTensorObject_mul, functional::mul) DIRECT_PASS_FUNC(PyTensorObject_mul_, functional::mul_) -// DIRECT_PASS_FUNC(PyTensorObject_add, functional::add) -// DIRECT_PASS_FUNC(PyTensorObject_sub, functional::sub) DIRECT_PASS_FUNC(PyTensorObject_fmod, functional::fmod) DIRECT_PASS_FUNC(PyTensorObject_logical_and, functional::logical_and) DIRECT_PASS_FUNC(PyTensorObject_logical_or, functional::logical_or) From a428c7b68dd3489406698590fe5657c466e7aff0 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Thu, 2 Jun 2022 16:16:53 +0800 Subject: [PATCH 46/50] fix typo --- python/oneflow/framework/tensor.py | 1 - 1 file changed, 1 deletion(-) diff --git a/python/oneflow/framework/tensor.py b/python/oneflow/framework/tensor.py index efc3c580a25..8c97c973596 100755 --- a/python/oneflow/framework/tensor.py +++ b/python/oneflow/framework/tensor.py @@ -1183,7 +1183,6 @@ def RegisterMethods(): Tensor.repeat_interleave = _repeat_interleave Tensor.tile = _tile Tensor.split = _split - Tensor.narrow = _narrow Tensor.to = _to Tensor.gather = _gather Tensor.T = property(_T) From b07e070603c207aa156a9b5ad73d407139b76c92 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Tue, 7 Jun 2022 13:27:17 +0800 Subject: [PATCH 47/50] fix bug of tensor.std() --- oneflow/core/functional/impl/math_functor.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/oneflow/core/functional/impl/math_functor.cpp b/oneflow/core/functional/impl/math_functor.cpp index 23aace93978..453546b630f 100644 --- a/oneflow/core/functional/impl/math_functor.cpp +++ b/oneflow/core/functional/impl/math_functor.cpp @@ -1953,7 +1953,12 @@ class StandardDeviationFunctor { Maybe operator()(const std::shared_ptr& input, const Optional>& dim, const Optional& unbiased, const Optional& keepdim) const { - std::vector axis = *JUST(CheckAxis(*JUST(dim), input->ndim())); + std::vector axis; + if(!dim) { + for (int i = 0; i < input->ndim(); i++) { axis.emplace_back(i); } + } else { + axis = *JUST(CheckAxis(*JUST(dim), input->ndim())); + } bool unbias = true; bool keepdims = false; if (unbiased.has_value()) { unbias = JUST(unbiased); } From 5bdd6b23baf9a2bf90d733efd4a62e83a14ed6dd Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Tue, 7 Jun 2022 13:58:10 +0800 Subject: [PATCH 48/50] refactor var std and cuda, using c++ functional api --- .../api/python/framework/tensor_functions.cpp | 87 +++++++++++++------ oneflow/core/functional/impl/math_functor.cpp | 2 +- 2 files changed, 63 insertions(+), 26 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 6a8aa8e33e0..b2f3e344463 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -440,44 +440,81 @@ static PyObject* PyTensorObject_cuda(PyObject* self, PyObject* args, PyObject* k &device_obj)) { return NULL; } - PyObjectPtr dict(PyDict_New()); + auto tensor = PyTensor_Unpack(self); + if (functional::PyDeviceCheck(device_obj)) { + Optional> device = functional::PyUnpackDevice(device_obj); + return PyTensor_New(ASSERT_PTR(functional::To(tensor, device, NullOpt, false))); + } + Optional device_str; if (device_obj == Py_None) { - device_obj = PyUnicode_FromString("cuda"); + device_str = "cuda"; } else if (PyLong_Check(device_obj)) { - std::string device_str = "cuda:" + std::to_string(PyLong_AsLongLong(device_obj)); - device_obj = PyUnicode_FromString(device_str.c_str()); + device_str = "cuda:" + std::to_string(PyLong_AsLongLong(device_obj)); } - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "device", device_obj) > -1); - PyObjectPtr tuple(PyTuple_Pack(1, self)); - PyObject* result = functional::to(NULL, tuple.get(), dict.get()); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; + return PyTensor_New(ASSERT_PTR(functional::To(tensor, device_str, tensor->dtype(), false))); END_HANDLE_ERRORS } static PyObject* PyTensorObject_var(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS - PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "unbiased", Py_True) > -1); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "keepdim", Py_False) > -1); - if (kwargs != NULL) { CHECK_OR_THROW(PyDict_Merge(dict.get(), kwargs, 1) > -1); } - PyObjectPtr concat_args(concat_self(self, args)); - PyObject* result = functional::var(NULL, concat_args.get(), dict.get()); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; + PyObject* dim_obj = Py_None; + PyObject* unbiased_obj = Py_True; + PyObject* keepdim_obj = Py_False; + static const char* keywords[4] = {"dim", "unbiased", "keepdim", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO!O!:var", const_cast(keywords), + &dim_obj, &PyBool_Type, &unbiased_obj, &PyBool_Type, + &keepdim_obj)) { + return NULL; + } + bool unbiased = unbiased_obj == Py_True; + bool keepdim = keepdim_obj == Py_True; + CHECK_OR_THROW(dim_obj == Py_None || PyLong_Check(dim_obj) + || functional::PyLongSequenceCheck(dim_obj)) + << Error::TypeError() << "var(): argument 'dim' must be int32 list, not " + << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(dim_obj))); + auto tensor = PyTensor_Unpack(self); + if (dim_obj == Py_None) { + return PyTensor_New(ASSERT_PTR(functional::Variance(tensor, NullOpt, unbiased, keepdim))); + } + std::vector dim; + if (PyLong_Check(dim_obj)) { + dim.emplace_back(static_cast(PyLong_AsLong(dim_obj))); + return PyTensor_New(ASSERT_PTR(functional::Variance(tensor, dim, unbiased, keepdim))); + } + dim = functional::PyUnpackLongSequence(dim_obj); + return PyTensor_New(ASSERT_PTR(functional::Variance(tensor, dim, unbiased, keepdim))); END_HANDLE_ERRORS } static PyObject* PyTensorObject_std(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS - PyObjectPtr dict(PyDict_New()); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "unbiased", Py_True) > -1); - CHECK_OR_THROW(PyDict_SetItemString(dict.get(), "keepdim", Py_False) > -1); - if (kwargs != NULL) { CHECK_OR_THROW(PyDict_Merge(dict.get(), kwargs, 1) > -1); } - PyObjectPtr concat_args(concat_self(self, args)); - PyObject* result = functional::std(NULL, concat_args.get(), dict.get()); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; + PyObject* dim_obj = Py_None; + PyObject* unbiased_obj = Py_True; + PyObject* keepdim_obj = Py_False; + static const char* keywords[4] = {"dim", "unbiased", "keepdim", NULL}; + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OO!O!:std", const_cast(keywords), + &dim_obj, &PyBool_Type, &unbiased_obj, &PyBool_Type, + &keepdim_obj)) { + return NULL; + } + bool unbiased = unbiased_obj == Py_True; + bool keepdim = keepdim_obj == Py_True; + CHECK_OR_THROW(dim_obj == Py_None || PyLong_Check(dim_obj) + || functional::PyLongSequenceCheck(dim_obj)) + << Error::TypeError() << "std(): argument 'dim' must be int32 list, not " + << functional::PyStringAsString(PyObject_Str((PyObject*)Py_TYPE(dim_obj))); + auto tensor = PyTensor_Unpack(self); + if (dim_obj == Py_None) { + return PyTensor_New( + ASSERT_PTR(functional::StandardDeviation(tensor, NullOpt, unbiased, keepdim))); + } + std::vector dim; + if (PyLong_Check(dim_obj)) { + dim.emplace_back(static_cast(PyLong_AsLong(dim_obj))); + return PyTensor_New(ASSERT_PTR(functional::StandardDeviation(tensor, dim, unbiased, keepdim))); + } + dim = functional::PyUnpackLongSequence(dim_obj); + return PyTensor_New(ASSERT_PTR(functional::StandardDeviation(tensor, dim, unbiased, keepdim))); END_HANDLE_ERRORS } diff --git a/oneflow/core/functional/impl/math_functor.cpp b/oneflow/core/functional/impl/math_functor.cpp index 453546b630f..c200acb3c38 100644 --- a/oneflow/core/functional/impl/math_functor.cpp +++ b/oneflow/core/functional/impl/math_functor.cpp @@ -1954,7 +1954,7 @@ class StandardDeviationFunctor { const Optional>& dim, const Optional& unbiased, const Optional& keepdim) const { std::vector axis; - if(!dim) { + if (!dim) { for (int i = 0; i < input->ndim(); i++) { axis.emplace_back(i); } } else { axis = *JUST(CheckAxis(*JUST(dim), input->ndim())); From 82d51accc4932ee8d8fcfff7fea92ea7d5f90445 Mon Sep 17 00:00:00 2001 From: Wang Yi Date: Tue, 7 Jun 2022 19:52:56 +0800 Subject: [PATCH 49/50] add beta and threshold in softplus --- oneflow/api/python/framework/tensor_functions.cpp | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index b2f3e344463..4e06a1bbd0f 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -518,12 +518,15 @@ static PyObject* PyTensorObject_std(PyObject* self, PyObject* args, PyObject* kw END_HANDLE_ERRORS } -static PyObject* PyTensorObject_softplus(PyObject* self, PyObject* unused) { +static PyObject* PyTensorObject_softplus(PyObject* self, PyObject* args, PyObject* kwargs) { HANDLE_ERRORS - PyObjectPtr concat_args(PyTuple_Pack(1, self)); - PyObject* result = functional::softplus(NULL, concat_args.get(), NULL); - if (PyErr_Occurred()) { throw py::error_already_set(); } - return result; + double beta = 1.0; + double threshold = 20.0; + static const char* keywords[3] = {"beta", "threshold", NULL}; + if(!PyArg_ParseTupleAndKeywords(args, kwargs, "dd:softplus", const_cast(keywords), &beta, &threshold)) { + return NULL; + } + return PyTensor_New(ASSERT_PTR(functional::Softplus(PyTensor_Unpack(self), beta, threshold))); END_HANDLE_ERRORS } @@ -657,7 +660,7 @@ PyMethodDef PyTensorObject_extra_methods[] = { {"cuda", (PyCFunction)PyTensorObject_cuda, METH_VARARGS | METH_KEYWORDS, NULL}, {"var", (PyCFunction)PyTensorObject_var, METH_VARARGS | METH_KEYWORDS, NULL}, {"std", (PyCFunction)PyTensorObject_std, METH_VARARGS | METH_KEYWORDS, NULL}, - {"softplus", PyTensorObject_softplus, METH_NOARGS, NULL}, + {"softplus", (PyCFunction)PyTensorObject_softplus, METH_VARARGS | METH_KEYWORDS, NULL}, {"relu", PyTensorObject_relu, METH_NOARGS, NULL}, {"relu_", PyTensorObject_relu_, METH_NOARGS, NULL}, {"all", (PyCFunction)PyTensorObject_all, METH_VARARGS | METH_KEYWORDS, NULL}, From 1f1e1a7a2084a4fbc7dbb63d304859e79156b10a Mon Sep 17 00:00:00 2001 From: oneflow-ci-bot Date: Tue, 7 Jun 2022 11:54:29 +0000 Subject: [PATCH 50/50] auto format by CI --- oneflow/api/python/framework/tensor_functions.cpp | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/oneflow/api/python/framework/tensor_functions.cpp b/oneflow/api/python/framework/tensor_functions.cpp index 4e06a1bbd0f..268c877dee8 100644 --- a/oneflow/api/python/framework/tensor_functions.cpp +++ b/oneflow/api/python/framework/tensor_functions.cpp @@ -523,7 +523,8 @@ static PyObject* PyTensorObject_softplus(PyObject* self, PyObject* args, PyObjec double beta = 1.0; double threshold = 20.0; static const char* keywords[3] = {"beta", "threshold", NULL}; - if(!PyArg_ParseTupleAndKeywords(args, kwargs, "dd:softplus", const_cast(keywords), &beta, &threshold)) { + if (!PyArg_ParseTupleAndKeywords(args, kwargs, "dd:softplus", const_cast(keywords), &beta, + &threshold)) { return NULL; } return PyTensor_New(ASSERT_PTR(functional::Softplus(PyTensor_Unpack(self), beta, threshold)));