Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

update dpnp.vdot implementation #1692

Merged
merged 4 commits into from
Feb 7, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions dpnp/backend/extensions/blas/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ set(python_module_name _blas_impl)
set(_module_src
${CMAKE_CURRENT_SOURCE_DIR}/blas_py.cpp
${CMAKE_CURRENT_SOURCE_DIR}/dot.cpp
${CMAKE_CURRENT_SOURCE_DIR}/dotc.cpp
${CMAKE_CURRENT_SOURCE_DIR}/dotu.cpp
${CMAKE_CURRENT_SOURCE_DIR}/gemm.cpp
${CMAKE_CURRENT_SOURCE_DIR}/gemm_batch.cpp
Expand Down
10 changes: 10 additions & 0 deletions dpnp/backend/extensions/blas/blas_py.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ namespace py = pybind11;
void init_dispatch_tables(void)
{
blas_ext::init_dot_dispatch_table();
blas_ext::init_dotc_dispatch_table();
blas_ext::init_dotu_dispatch_table();
blas_ext::init_gemm_batch_dispatch_table();
blas_ext::init_gemm_dispatch_table();
Expand All @@ -57,6 +58,15 @@ PYBIND11_MODULE(_blas_impl, m)
py::arg("result"), py::arg("depends") = py::list());
}

{
m.def("_dotc", &blas_ext::dotc,
"Call `dotc` from OneMKL LAPACK library to return "
"the dot product of two complex vectors, "
"conjugating the first vector.",
py::arg("sycl_queue"), py::arg("vectorA"), py::arg("vectorB"),
py::arg("result"), py::arg("depends") = py::list());
}

{
m.def("_dotu", &blas_ext::dotu,
"Call `dotu` from OneMKL LAPACK library to return "
Expand Down
8 changes: 8 additions & 0 deletions dpnp/backend/extensions/blas/dot.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,13 @@ extern std::pair<sycl::event, sycl::event>
dpctl::tensor::usm_ndarray result,
const std::vector<sycl::event> &depends);

extern std::pair<sycl::event, sycl::event>
dotc(sycl::queue &exec_q,
dpctl::tensor::usm_ndarray vectorA,
dpctl::tensor::usm_ndarray vectorB,
dpctl::tensor::usm_ndarray result,
const std::vector<sycl::event> &depends);

extern std::pair<sycl::event, sycl::event>
dotu(sycl::queue &exec_q,
dpctl::tensor::usm_ndarray vectorA,
Expand All @@ -53,6 +60,7 @@ extern std::pair<sycl::event, sycl::event>
const std::vector<sycl::event> &depends);

extern void init_dot_dispatch_table(void);
extern void init_dotc_dispatch_table(void);
extern void init_dotu_dispatch_table(void);
} // namespace blas
} // namespace ext
Expand Down
241 changes: 241 additions & 0 deletions dpnp/backend/extensions/blas/dotc.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,241 @@
//*****************************************************************************
// Copyright (c) 2024, Intel Corporation
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// - Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
// THE POSSIBILITY OF SUCH DAMAGE.
//*****************************************************************************

#include <pybind11/pybind11.h>

// dpctl tensor headers
#include "utils/memory_overlap.hpp"
#include "utils/type_utils.hpp"

#include "dot.hpp"
#include "types_matrix.hpp"

#include "dpnp_utils.hpp"

namespace dpnp
{
namespace backend
{
namespace ext
{
namespace blas
{
namespace mkl_blas = oneapi::mkl::blas;
namespace py = pybind11;
namespace type_utils = dpctl::tensor::type_utils;

typedef sycl::event (*dotc_impl_fn_ptr_t)(sycl::queue &,
const std::int64_t,
char *,
const std::int64_t,
char *,
const std::int64_t,
char *,
const std::vector<sycl::event> &);

static dotc_impl_fn_ptr_t dotc_dispatch_table[dpctl_td_ns::num_types]
[dpctl_td_ns::num_types];

template <typename Tab, typename Tc>
static sycl::event dotc_impl(sycl::queue &exec_q,
const std::int64_t n,
char *vectorA,
const std::int64_t stride_a,
char *vectorB,
const std::int64_t stride_b,
char *result,
const std::vector<sycl::event> &depends)
{
type_utils::validate_type_for_device<Tab>(exec_q);
type_utils::validate_type_for_device<Tc>(exec_q);

Tab *a = reinterpret_cast<Tab *>(vectorA);
Tab *b = reinterpret_cast<Tab *>(vectorB);
Tc *res = reinterpret_cast<Tc *>(result);

std::stringstream error_msg;
bool is_exception_caught = false;

sycl::event dotc_event;
try {
dotc_event = mkl_blas::row_major::dotc(exec_q,
n, // size of the input vectors
a, // Pointer to vector a.
stride_a, // Stride of vector a.
b, // Pointer to vector b.
stride_b, // Stride of vector b.
res, // Pointer to result.
depends);
} catch (oneapi::mkl::exception const &e) {
error_msg
<< "Unexpected MKL exception caught during dotc() call:\nreason: "
<< e.what();
is_exception_caught = true;
} catch (sycl::exception const &e) {
error_msg << "Unexpected SYCL exception caught during dotc() call:\n"
<< e.what();
is_exception_caught = true;
}

if (is_exception_caught) // an unexpected error occurs
{
throw std::runtime_error(error_msg.str());
}

return dotc_event;
}

std::pair<sycl::event, sycl::event>
dotc(sycl::queue &exec_q,
dpctl::tensor::usm_ndarray vectorA,
dpctl::tensor::usm_ndarray vectorB,
dpctl::tensor::usm_ndarray result,
const std::vector<sycl::event> &depends)
{
const int vectorA_nd = vectorA.get_ndim();
const int vectorB_nd = vectorB.get_ndim();
const int result_nd = result.get_ndim();

if ((vectorA_nd != 1)) {
throw py::value_error(
"The first input array has ndim=" + std::to_string(vectorA_nd) +
", but a 1-dimensional array is expected.");
}

if ((vectorB_nd != 1)) {
throw py::value_error(
"The second input array has ndim=" + std::to_string(vectorB_nd) +
", but a 1-dimensional array is expected.");
}

if ((result_nd != 0)) {
throw py::value_error(
"The output array has ndim=" + std::to_string(result_nd) +
", but a 0-dimensional array is expected.");
}

auto const &overlap = dpctl::tensor::overlap::MemoryOverlap();
if (overlap(vectorA, result)) {
throw py::value_error(
"The first input array and output array are overlapping "
"segments of memory");
}
if (overlap(vectorB, result)) {
throw py::value_error(
"The second input array and output array are overlapping "
"segments of memory");
}

// check compatibility of execution queue and allocation queue
if (!dpctl::utils::queues_are_compatible(
exec_q,
{vectorA.get_queue(), vectorB.get_queue(), result.get_queue()}))
{
throw py::value_error(
"USM allocations are not compatible with the execution queue.");
}

py::ssize_t a_size = vectorA.get_size();
py::ssize_t b_size = vectorB.get_size();
if (a_size != b_size) {
throw py::value_error("The size of the first input array must be "
"equal to the size of the second input array.");
}

std::vector<py::ssize_t> a_stride = vectorA.get_strides_vector();
std::vector<py::ssize_t> b_stride = vectorB.get_strides_vector();

const std::int64_t n = a_size;
const std::int64_t str_a = a_stride[0];
const std::int64_t str_b = b_stride[0];

int vectorA_typenum = vectorA.get_typenum();
int vectorB_typenum = vectorB.get_typenum();
int result_typenum = result.get_typenum();

if (vectorA_typenum != vectorB_typenum) {
throw py::value_error(
"Input arrays must be of must be of the same type.");
}

auto array_types = dpctl_td_ns::usm_ndarray_types();
int vectorAB_type_id = array_types.typenum_to_lookup_id(vectorA_typenum);
int result_type_id = array_types.typenum_to_lookup_id(result_typenum);

dotc_impl_fn_ptr_t dotc_fn =
dotc_dispatch_table[vectorAB_type_id][result_type_id];
if (dotc_fn == nullptr) {
throw py::value_error(
"Types of input vectors and result array are mismatched.");
}

char *a_typeless_ptr = vectorA.get_data();
char *b_typeless_ptr = vectorB.get_data();
char *r_typeless_ptr = result.get_data();

const int a_elemsize = vectorA.get_elemsize();
const int b_elemsize = vectorB.get_elemsize();
if (str_a < 0) {
a_typeless_ptr -= (n - 1) * std::abs(str_a) * a_elemsize;
}
if (str_b < 0) {
b_typeless_ptr -= (n - 1) * std::abs(str_b) * b_elemsize;
}

sycl::event dotc_ev =
dotc_fn(exec_q, n, a_typeless_ptr, str_a, b_typeless_ptr, str_b,
r_typeless_ptr, depends);

sycl::event args_ev = dpctl::utils::keep_args_alive(
exec_q, {vectorA, vectorB, result}, {dotc_ev});

return std::make_pair(args_ev, dotc_ev);
}

template <typename fnT, typename Tab, typename Tc>
struct DotcContigFactory
{
fnT get()
{
if constexpr (types::DotcTypePairSupportFactory<Tab, Tc>::is_defined) {
return dotc_impl<Tab, Tc>;
}
else {
return nullptr;
}
}
};

void init_dotc_dispatch_table(void)
{
dpctl_td_ns::DispatchTableBuilder<dotc_impl_fn_ptr_t, DotcContigFactory,
dpctl_td_ns::num_types>
contig;
contig.populate_dispatch_table(dotc_dispatch_table);
}
} // namespace blas
} // namespace ext
} // namespace backend
} // namespace dpnp
24 changes: 24 additions & 0 deletions dpnp/backend/extensions/blas/types_matrix.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,30 @@ struct DotTypePairSupportFactory
dpctl_td_ns::NotDefinedEntry>::is_defined;
};

/**
* @brief A factory to define pairs of supported types for which
* MKL BLAS library provides support in oneapi::mkl::blas::dotc<Tab, Tc>
* function.
*
* @tparam Tab Type of arrays containing input vectors A and B.
* @tparam Tc Type of array containing output.
*/
template <typename Tab, typename Tc>
struct DotcTypePairSupportFactory
{
static constexpr bool is_defined = std::disjunction<
dpctl_td_ns::TypePairDefinedEntry<Tab,
std::complex<float>,
Tc,
std::complex<float>>,
dpctl_td_ns::TypePairDefinedEntry<Tab,
std::complex<double>,
Tc,
std::complex<double>>,
// fall-through
dpctl_td_ns::NotDefinedEntry>::is_defined;
};

/**
* @brief A factory to define pairs of supported types for which
* MKL BLAS library provides support in oneapi::mkl::blas::dotu<Tab, Tc>
Expand Down
17 changes: 14 additions & 3 deletions dpnp/dpnp_iface.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,7 +205,7 @@ def astype(x1, dtype, order="K", casting="unsafe", copy=True):
return dpnp_array._create_from_usm_ndarray(array_obj)


def check_supported_arrays_type(*arrays, scalar_type=False):
def check_supported_arrays_type(*arrays, scalar_type=False, all_scalars=False):
"""
Return ``True`` if each array has either type of scalar,
:class:`dpnp.ndarray` or :class:`dpctl.tensor.usm_ndarray`.
Expand All @@ -216,7 +216,9 @@ def check_supported_arrays_type(*arrays, scalar_type=False):
arrays : {dpnp_array, usm_ndarray}
Input arrays to check for supported types.
scalar_type : {bool}, optional
A scalar type is also considered as supported if flag is True.
A scalar type is also considered as supported if flag is ``True``.
all_scalars : {bool}, optional
All the input arrays can be scalar if flag is ``True``.

Returns
-------
Expand All @@ -231,13 +233,22 @@ def check_supported_arrays_type(*arrays, scalar_type=False):

"""

any_is_array = False
for a in arrays:
if scalar_type and dpnp.isscalar(a) or is_supported_array_type(a):
if is_supported_array_type(a):
any_is_array = True
continue
elif scalar_type and dpnp.isscalar(a):
continue

raise TypeError(
"An array must be any of supported type, but got {}".format(type(a))
)

if len(arrays) > 1 and not (all_scalars or any_is_array):
raise TypeError(
"At least one input must be of supported array type, but got all scalars."
)
return True


Expand Down
Loading
Loading