Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[webnn] Add float32 tests for WebNN hardSwish op #38712

Merged
merged 2 commits into from
Mar 1, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions webnn/hard_swish.https.any.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
// META: title=test WebNN API tanh operation
// META: global=window,dedicatedworker
// META: script=./resources/utils.js
// META: timeout=long

'use strict';

// https://webmachinelearning.github.io/webnn/#api-mlgraphbuilder-hard-swish

testWebNNOperation('hardSwish', buildOperationWithSingleInput);
334 changes: 334 additions & 0 deletions webnn/resources/test_data/hard_swish.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,334 @@
{
"tests": [
{
"name": "hardSwish float32 1D tensor",
"inputs": {
"x": {
"shape": [24],
"data": [
0.7341583533045579,
9.118859151005996,
3.545238531520827,
2.621943879280181,
-6.445507690595167,
-1.6835596550754381,
5.523179785756591,
-5.958856051028132,
-9.169189933081544,
6.420943542920213,
-3.293031330275471,
1.0410166785810624,
-7.246322671816956,
-0.9472730969847909,
-5.778352255817807,
3.185229125228698,
-7.261818072290236,
4.174602615173795,
3.7802628241590686,
-6.07124038718255,
-9.909919471919547,
-7.744259390113584,
-8.286120816748381,
8.083491160956697
],
"type": "float32"
}
},
"expected": {
"name": "output",
"shape": [24],
"data": [
0.4569105803966522,
9.11885929107666,
3.545238494873047,
2.4567370414733887,
0,
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Fail to run these hardSwish tests by Chromium integrated WebNN API with XNNPack API back, since XNNPack API can distinguish 0.0 and -0.0.
@fdwr @huningxin Any suggestion? Thanks.

Copy link

@fdwr fdwr Feb 27, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Signed and unsigned zero can be treated as equivalent (IEEE does https://en.wikipedia.org/wiki/Signed_zero#Comparisons). I recommend that before checking for ULP differences in assert_array_approx_equals_ulp, first check if expected == actual, and only if !=, would you need to check ULP too.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@fdwr Thanks for your suggestion! Yes, if actual was as same as expect one, no need to measure the ULP distance which would save testing time. Please take another look, thanks.

-0.3693843185901642,
5.52318000793457,
0,
0,
6.420943737030029,
0,
0.7011276483535767,
0,
-0.3240821659564972,
0,
3.1852290630340576,
0,
4.174602508544922,
3.7802627086639404,
0,
0,
0,
0,
8.083491325378418
],
"type": "float32"
}
},
{
"name": "hardSwish float32 2D tensor",
"inputs": {
"x": {
"shape": [4, 6],
"data": [
0.7341583533045579,
9.118859151005996,
3.545238531520827,
2.621943879280181,
-6.445507690595167,
-1.6835596550754381,
5.523179785756591,
-5.958856051028132,
-9.169189933081544,
6.420943542920213,
-3.293031330275471,
1.0410166785810624,
-7.246322671816956,
-0.9472730969847909,
-5.778352255817807,
3.185229125228698,
-7.261818072290236,
4.174602615173795,
3.7802628241590686,
-6.07124038718255,
-9.909919471919547,
-7.744259390113584,
-8.286120816748381,
8.083491160956697
],
"type": "float32"
}
},
"expected": {
"name": "output",
"shape": [4, 6],
"data": [
0.4569105803966522,
9.11885929107666,
3.545238494873047,
2.4567370414733887,
0,
-0.3693843185901642,
5.52318000793457,
0,
0,
6.420943737030029,
0,
0.7011276483535767,
0,
-0.3240821659564972,
0,
3.1852290630340576,
0,
4.174602508544922,
3.7802627086639404,
0,
0,
0,
0,
8.083491325378418
],
"type": "float32"
}
},
{
"name": "hardSwish float32 3D tensor",
"inputs": {
"x": {
"shape": [2, 3, 4],
"data": [
0.7341583533045579,
9.118859151005996,
3.545238531520827,
2.621943879280181,
-6.445507690595167,
-1.6835596550754381,
5.523179785756591,
-5.958856051028132,
-9.169189933081544,
6.420943542920213,
-3.293031330275471,
1.0410166785810624,
-7.246322671816956,
-0.9472730969847909,
-5.778352255817807,
3.185229125228698,
-7.261818072290236,
4.174602615173795,
3.7802628241590686,
-6.07124038718255,
-9.909919471919547,
-7.744259390113584,
-8.286120816748381,
8.083491160956697
],
"type": "float32"
}
},
"expected": {
"name": "output",
"shape": [2, 3, 4],
"data": [
0.4569105803966522,
9.11885929107666,
3.545238494873047,
2.4567370414733887,
0,
-0.3693843185901642,
5.52318000793457,
0,
0,
6.420943737030029,
0,
0.7011276483535767,
0,
-0.3240821659564972,
0,
3.1852290630340576,
0,
4.174602508544922,
3.7802627086639404,
0,
0,
0,
0,
8.083491325378418
],
"type": "float32"
}
},
{
"name": "hardSwish float32 4D tensor",
"inputs": {
"x": {
"shape": [2, 2, 2, 3],
"data": [
0.7341583533045579,
9.118859151005996,
3.545238531520827,
2.621943879280181,
-6.445507690595167,
-1.6835596550754381,
5.523179785756591,
-5.958856051028132,
-9.169189933081544,
6.420943542920213,
-3.293031330275471,
1.0410166785810624,
-7.246322671816956,
-0.9472730969847909,
-5.778352255817807,
3.185229125228698,
-7.261818072290236,
4.174602615173795,
3.7802628241590686,
-6.07124038718255,
-9.909919471919547,
-7.744259390113584,
-8.286120816748381,
8.083491160956697
],
"type": "float32"
}
},
"expected": {
"name": "output",
"shape": [2, 2, 2, 3],
"data": [
0.4569105803966522,
9.11885929107666,
3.545238494873047,
2.4567370414733887,
0,
-0.3693843185901642,
5.52318000793457,
0,
0,
6.420943737030029,
0,
0.7011276483535767,
0,
-0.3240821659564972,
0,
3.1852290630340576,
0,
4.174602508544922,
3.7802627086639404,
0,
0,
0,
0,
8.083491325378418
],
"type": "float32"
}
},
{
"name": "hardSwish float32 5D tensor",
"inputs": {
"x": {
"shape": [2, 1, 4, 1, 3],
"data": [
0.7341583533045579,
9.118859151005996,
3.545238531520827,
2.621943879280181,
-6.445507690595167,
-1.6835596550754381,
5.523179785756591,
-5.958856051028132,
-9.169189933081544,
6.420943542920213,
-3.293031330275471,
1.0410166785810624,
-7.246322671816956,
-0.9472730969847909,
-5.778352255817807,
3.185229125228698,
-7.261818072290236,
4.174602615173795,
3.7802628241590686,
-6.07124038718255,
-9.909919471919547,
-7.744259390113584,
-8.286120816748381,
8.083491160956697
],
"type": "float32"
}
},
"expected": {
"name": "output",
"shape": [2, 1, 4, 1, 3],
"data": [
0.4569105803966522,
9.11885929107666,
3.545238494873047,
2.4567370414733887,
0,
-0.3693843185901642,
5.52318000793457,
0,
0,
6.420943737030029,
0,
0.7011276483535767,
0,
-0.3240821659564972,
0,
3.1852290630340576,
0,
4.174602508544922,
3.7802627086639404,
0,
0,
0,
0,
8.083491325378418
],
"type": "float32"
}
}
]
}
18 changes: 12 additions & 6 deletions webnn/resources/utils.js
Original file line number Diff line number Diff line change
Expand Up @@ -268,6 +268,7 @@ const PrecisionMetrics = {
sin: {ATOL: {float32: 1/1024, float16: 1/512}},
tan: {ATOL: {float32: 1/1024, float16: 1/512}},
// End Element-wise unary operations
hardSwish: {ULP: {float32: 4, float16: 4}},
gemm: {ULP: {float32: getGemmPrecisionTolerance, float16: getGemmPrecisionTolerance}},
leakyRelu: {ULP: {float32: 1, float16: 1}},
matmul: {ULP: {float32: getMatmulPrecisionTolerance, float16: getMatmulPrecisionTolerance}},
Expand Down Expand Up @@ -353,12 +354,17 @@ const assert_array_approx_equals_ulp = (actual, expected, nulp, dataType, descri
`assert_array_approx_equals_ulp: ${description} lengths differ, expected ${expected.length} but got ${actual.length}`);
let actualBitwise, expectedBitwise, distance;
for (let i = 0; i < actual.length; i++) {
actualBitwise = getBitwise(actual[i], dataType);
expectedBitwise = getBitwise(expected[i], dataType);
distance = actualBitwise - expectedBitwise;
distance = distance >= 0 ? distance : -distance;
assert_true(distance <= nulp,
`assert_array_approx_equals_ulp: ${description} actual ${actual[i]} should be close enough to expected ${expected[i]} by the acceptable ${nulp} ULP distance, but they have ${distance} ULP distance`);
if (actual[i] === expected[i]) {
continue;
} else {
// measure the ULP distance
actualBitwise = getBitwise(actual[i], dataType);
expectedBitwise = getBitwise(expected[i], dataType);
distance = actualBitwise - expectedBitwise;
distance = distance >= 0 ? distance : -distance;
assert_true(distance <= nulp,
`assert_array_approx_equals_ulp: ${description} actual ${actual[i]} should be close enough to expected ${expected[i]} by the acceptable ${nulp} ULP distance, but they have ${distance} ULP distance`);
}
}
};

Expand Down