Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Dev support char and short dtype #10086

Merged
merged 13 commits into from
Apr 9, 2023
2 changes: 2 additions & 0 deletions oneflow/api/python/framework/dtype.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@ ONEFLOW_API_PYBIND11_MODULE("", m) {
m.attr("cfloat") = &CHECK_JUST(DType::Get(DataType::kComplex64));
m.attr("complex128") = &CHECK_JUST(DType::Get(DataType::kComplex128));
m.attr("cdouble") = &CHECK_JUST(DType::Get(DataType::kComplex128));
m.attr("char") = &CHECK_JUST(DType::Get(DataType::kChar));
m.attr("short") = &CHECK_JUST(DType::Get(DataType::kInt16));

py::options options;
options.disable_function_signatures();
Expand Down
3 changes: 2 additions & 1 deletion oneflow/api/python/framework/tensor.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -394,7 +394,8 @@ static PyObject* PyTensorObject_to_numpy(PyObject* self, PyObject* unused) {
switch (data_type) {
#define SWITCH_EAGER_TENSOR_TO_NUMPY(cpp_type, of_type) \
case of_type: return ASSERT(EagerLocalTensorToNumpy<cpp_type>(self));
OF_PP_FOR_EACH_TUPLE(SWITCH_EAGER_TENSOR_TO_NUMPY, POD_DATA_TYPE_SEQ COMPLEX_DATA_TYPE_SEQ)
OF_PP_FOR_EACH_TUPLE(SWITCH_EAGER_TENSOR_TO_NUMPY,
POD_DATA_TYPE_SEQ INT16_DATA_TYPE_SEQ COMPLEX_DATA_TYPE_SEQ)
case DataType::kFloat16: return ASSERT(EagerLocalTensorToNumpy<float16>(self));
default: {
return PyErr_Format(PyExc_RuntimeError,
Expand Down
2 changes: 2 additions & 0 deletions oneflow/api/python/framework/tensortype.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@ static const std::unordered_map<Symbol<DType>, std::string> all_data_types = {
{DType::Complex32(), "ComplexHalfTensor"},
{DType::Complex64(), "ComplexFloatTensor"},
{DType::Complex128(), "ComplexDoubleTensor"},
{DType::Char(), "CharTensor"},
{DType::Int16(), "ShortTensor"},
};

static const std::string get_dtype_string(PyTensorType* tensortype) {
Expand Down
2 changes: 2 additions & 0 deletions oneflow/api/python/functional/python_arg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -212,6 +212,8 @@ bool PythonArg::TypeCheck(ValueType type) const {
if (tag_ == HAS_DEFAULT) { return default_val_->value_type() == type; }
switch (type) {
case kINT32:
case kINT16:
case kChar:
case kUINT32:
case kINT64:
case kUINT64:
Expand Down
3 changes: 2 additions & 1 deletion oneflow/api/python/functional/value_types.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,8 @@ HashMap<ValueType, std::string>* GetValueTypeNameMap() {
{kMEMORYFORMAT, "memory format"},
{kCOMPLEX_FLOAT, "complex float"},
{kCOMPLEX_DOUBLE, "complex double"},
};
{kCHAR, "char"},
{kINT16, "int16"}};
return &value_type_name_map;
}

Expand Down
4 changes: 4 additions & 0 deletions oneflow/api/python/functional/value_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -118,6 +118,8 @@ enum ValueType : int {
// Complex
kCOMPLEX_FLOAT,
kCOMPLEX_DOUBLE,
kCHAR,
kINT16
};

#define VALUE_TYPE_OF_IMPL(cpp_type, value_type) \
Expand All @@ -133,6 +135,8 @@ enum ValueType : int {

VALUE_TYPE_OF_IMPL(void, kVOID);
VALUE_TYPE_OF_IMPL(int32_t, kINT32);
VALUE_TYPE_OF_IMPL(int16_t, kINT16);
VALUE_TYPE_OF_IMPL(char, kCHAR);
VALUE_TYPE_OF_IMPL(uint32_t, kUINT32);
VALUE_TYPE_OF_IMPL(int64_t, kINT64);
VALUE_TYPE_OF_IMPL(uint64_t, kUINT64);
Expand Down
3 changes: 2 additions & 1 deletion oneflow/core/common/data_type.h
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,8 @@ struct GetDataType<void> : std::integral_constant<DataType, DataType::kChar> {};
inline type_cpp GetTypeByDataType(std::integral_constant<DataType, type_proto>) { return {}; }
OF_PP_FOR_EACH_TUPLE(SPECIALIZE_GET_DATA_TYPE,
ALL_DATA_TYPE_SEQ UNSIGNED_INT32_DATA_TYPE_SEQ FLOAT16_DATA_TYPE_SEQ
BFLOAT16_DATA_TYPE_SEQ COMPLEX_DATA_TYPE_SEQ UNSIGNED_INT64_DATA_TYPE_SEQ);
BFLOAT16_DATA_TYPE_SEQ COMPLEX_DATA_TYPE_SEQ UNSIGNED_INT64_DATA_TYPE_SEQ
INT16_DATA_TYPE_SEQ);
#undef SPECIALIZE_GET_DATA_TYPE

template<typename T>
Expand Down
1 change: 1 addition & 0 deletions oneflow/core/common/data_type_seq.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ limitations under the License.
OF_PP_MAKE_TUPLE_SEQ(int32_t, DataType::kInt32) \
OF_PP_MAKE_TUPLE_SEQ(int64_t, DataType::kInt64)

#define INT16_DATA_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(int16_t, DataType::kInt16)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里为什么不加到 SIGNED_INT_DATA_TYPE_SEQ 里面呢

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这个后续需要整理一下,现在宏已经烂了,如果加到SIGNED_INT_DATA_TYPE_SEQ里面某些kernel宏展开会失败,就先单独处理了

#define UNSIGNED_INT_DATA_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(uint8_t, DataType::kUInt8)
#define UNSIGNED_INT32_DATA_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(uint32_t, DataType::kUInt32)
#define UNSIGNED_INT64_DATA_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(uint64_t, DataType::kUInt64)
Expand Down
2 changes: 2 additions & 0 deletions oneflow/core/ep/cpu/primitive/type_seq.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@ limitations under the License.
#define CPU_PRIMITIVE_BOOL_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(bool, DataType::kBool)
#define CPU_PRIMITIVE_CHAR_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(char, DataType::kChar)
#define CPU_PRIMITIVE_INT8_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(int8_t, DataType::kInt8)
#define CPU_PRIMITIVE_INT16_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(int16_t, DataType::kInt16)
#define CPU_PRIMITIVE_UINT8_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(uint8_t, DataType::kUInt8)
#define CPU_PRIMITIVE_INT32_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(int32_t, DataType::kInt32)
#define CPU_PRIMITIVE_UINT32_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(uint32_t, DataType::kUInt32)
Expand Down Expand Up @@ -60,6 +61,7 @@ limitations under the License.
CPU_PRIMITIVE_BOOL_TYPE_SEQ \
CPU_PRIMITIVE_CHAR_TYPE_SEQ \
CPU_PRIMITIVE_INT8_TYPE_SEQ \
CPU_PRIMITIVE_INT16_TYPE_SEQ \
CPU_PRIMITIVE_UINT8_TYPE_SEQ \
CPU_PRIMITIVE_INT32_TYPE_SEQ \
CPU_PRIMITIVE_UINT32_TYPE_SEQ \
Expand Down
2 changes: 2 additions & 0 deletions oneflow/core/ep/cuda/primitive/type_seq.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ limitations under the License.
#define CUDA_PRIMITIVE_CHAR_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(char, DataType::kChar)
#define CUDA_PRIMITIVE_INT8_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(int8_t, DataType::kInt8)
#define CUDA_PRIMITIVE_UINT8_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(uint8_t, DataType::kUInt8)
#define CUDA_PRIMITIVE_INT16_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(int16_t, DataType::kInt16)
#define CUDA_PRIMITIVE_INT32_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(int32_t, DataType::kInt32)
#define CUDA_PRIMITIVE_UINT32_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(uint32_t, DataType::kUInt32)
#define CUDA_PRIMITIVE_INT64_TYPE_SEQ OF_PP_MAKE_TUPLE_SEQ(int64_t, DataType::kInt64)
Expand All @@ -54,6 +55,7 @@ limitations under the License.
CUDA_PRIMITIVE_CHAR_TYPE_SEQ \
CUDA_PRIMITIVE_INT8_TYPE_SEQ \
CUDA_PRIMITIVE_UINT8_TYPE_SEQ \
CUDA_PRIMITIVE_INT16_TYPE_SEQ \
CUDA_PRIMITIVE_INT32_TYPE_SEQ \
CUDA_PRIMITIVE_INT64_TYPE_SEQ \
CUDA_PRIMITIVE_FLOAT_TYPE_SEQ \
Expand Down
3 changes: 3 additions & 0 deletions oneflow/extension/python/numpy.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ Maybe<int> OFDataTypeToNumpyType(DataType of_data_type) {
case DataType::kFloat: return NPY_FLOAT32;
case DataType::kDouble: return NPY_FLOAT64;
case DataType::kInt8: return NPY_INT8;
case DataType::kInt16: return NPY_INT16;
case DataType::kChar: return NPY_INT8;
case DataType::kInt32: return NPY_INT32;
case DataType::kInt64: return NPY_INT64;
case DataType::kUInt8: return NPY_UINT8;
Expand All @@ -61,6 +63,7 @@ Maybe<DataType> NumpyTypeToOFDataType(int np_type) {
case NPY_FLOAT32: return DataType::kFloat;
case NPY_FLOAT64: return DataType::kDouble;
case NPY_INT8: return DataType::kInt8;
case NPY_INT16: return DataType::kInt16;
case NPY_INT32: return DataType::kInt32;
case NPY_INT64:
case NPY_LONGLONG: return DataType::kInt64;
Expand Down
3 changes: 3 additions & 0 deletions python/oneflow/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,9 @@
locals()["record"] = oneflow._oneflow_internal.record
locals()["tensor_buffer"] = oneflow._oneflow_internal.tensor_buffer
locals()["bfloat16"] = oneflow._oneflow_internal.bfloat16
locals()["char"] = oneflow._oneflow_internal.char
locals()["short"] = oneflow._oneflow_internal.int16
locals()["int16"] = oneflow._oneflow_internal.int16

locals()["cfloat"] = oneflow._oneflow_internal.cfloat
locals()["complex64"] = oneflow._oneflow_internal.complex64
Expand Down
4 changes: 4 additions & 0 deletions python/oneflow/framework/dtype.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,8 @@
oneflow.float64,
oneflow.float16,
oneflow.int8,
oneflow.char,
oneflow.int16,
oneflow.int32,
oneflow.int64,
oneflow.uint8,
Expand Down Expand Up @@ -63,6 +65,8 @@ def convert_proto_dtype_to_oneflow_dtype(proto_dtype):
oneflow.float64: np.double,
oneflow.double: np.double,
oneflow.int8: np.int8,
oneflow.char: np.int8,
oneflow.int16: np.int16,
oneflow.int32: np.int32,
oneflow.int64: np.int64,
oneflow.uint8: np.uint8,
Expand Down
8 changes: 8 additions & 0 deletions python/oneflow/test/modules/test_constant.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,14 @@ def _test_different_dtype(test_case, device, shape):
test_case.assertTrue(np.array_equal(np.ones(shape, dtype=np.uint8), y2.numpy()))
y3 = flow.ones(shape, dtype=flow.float64, device=flow.device(device))
test_case.assertTrue(np.array_equal(np.ones(shape, dtype=np.float64), y3.numpy()))
y4 = flow.ones(shape, dtype=flow.short, device=flow.device(device))
test_case.assertTrue(np.array_equal(np.ones(shape, dtype=np.short), y4.numpy()))
y5 = flow.ones(shape, dtype=flow.int16, device=flow.device(device))
test_case.assertTrue(np.array_equal(np.ones(shape, dtype=np.int16), y5.numpy()))
y6 = flow.ones(shape, dtype=flow.char, device=flow.device(device))
test_case.assertTrue(np.array_equal(np.ones(shape, dtype=np.int8), y6.numpy()))
y7 = flow.ones(shape, dtype=flow.int8, device=flow.device(device))
test_case.assertTrue(np.array_equal(np.ones(shape, dtype=np.int8), y7.numpy()))


@flow.unittest.skip_unless_1n1d()
Expand Down