diff --git a/cmake/CMakeLists.txt b/cmake/CMakeLists.txt
index fb6bf16644de6..acbde7f56a8d5 100644
--- a/cmake/CMakeLists.txt
+++ b/cmake/CMakeLists.txt
@@ -175,6 +175,8 @@ option(onnxruntime_PREBUILT_PYTORCH_PATH "Path to pytorch installation dir")
 # external transformer src path
 option(onnxruntime_EXTERNAL_TRANSFORMER_SRC_PATH "Path to external transformer src dir")
 
+option(onnxruntime_ENABLE_CUDA_PROFILING "Enable CUDA kernel profiling" OFF)
+
 if (onnxruntime_USE_CUDA)
   set(onnxruntime_DISABLE_RTTI OFF)
 endif()
@@ -960,7 +962,11 @@ if (WIN32)
         # issued by thrust nonstandard extension used: nameless struct/union
         list(APPEND ORT_WARNING_FLAGS "/wd4201")
         # warning C4800: Implicit conversion from 'X' to bool. Possible information loss
-        list(APPEND ORT_WARNING_FLAGS "/w34800")
+        if (onnxruntime_USE_OPENVINO)
+           list(APPEND ORT_WARNING_FLAGS "/wd4800")
+        else()
+           list(APPEND ORT_WARNING_FLAGS "/w34800")
+        endif()
         if (onnxruntime_USE_OPENMP)
             list(APPEND ORT_WARNING_FLAGS "/wd6993") # Code analysis ignores OpenMP constructs
         endif()
@@ -1696,6 +1702,10 @@ if (onnxruntime_ENABLE_TRAINING_OPS)
   add_compile_definitions(ENABLE_TRAINING_OPS)
 endif()
 
+if (onnxruntime_ENABLE_CUDA_PROFILING)
+  add_compile_definitions(ENABLE_CUDA_PROFILING)
+endif()
+
 if (onnxruntime_ENABLE_TRAINING)
   add_compile_definitions(ENABLE_TRAINING)
   add_compile_definitions(ENABLE_TRAINING_OPS)
diff --git a/cmake/onnxruntime_providers.cmake b/cmake/onnxruntime_providers.cmake
index adf121a196a9a..a82629bed4c66 100644
--- a/cmake/onnxruntime_providers.cmake
+++ b/cmake/onnxruntime_providers.cmake
@@ -353,13 +353,18 @@ if (onnxruntime_USE_CUDA)
   endif()
 
   add_dependencies(onnxruntime_providers_cuda onnxruntime_providers_shared ${onnxruntime_EXTERNAL_DEPENDENCIES} ${onnxruntime_tvm_dependencies})
-  target_link_directories(onnxruntime_providers_cuda PRIVATE ${onnxruntime_CUDA_HOME}/extras/CUPTI/lib64)
-  target_link_libraries(onnxruntime_providers_cuda PRIVATE cublas cudnn curand cufft cupti ${ONNXRUNTIME_PROVIDERS_SHARED})
-  target_include_directories(onnxruntime_providers_cuda PRIVATE ${ONNXRUNTIME_ROOT} ${CMAKE_CURRENT_BINARY_DIR} ${onnxruntime_CUDNN_HOME}/include ${eigen_INCLUDE_DIRS} ${TVM_INCLUDES} PUBLIC ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES} ${onnxruntime_CUDA_HOME}/extras/CUPTI/include)
+  target_link_libraries(onnxruntime_providers_cuda PRIVATE cublas cudnn curand cufft ${ONNXRUNTIME_PROVIDERS_SHARED})
+  target_include_directories(onnxruntime_providers_cuda PRIVATE ${ONNXRUNTIME_ROOT} ${CMAKE_CURRENT_BINARY_DIR} ${onnxruntime_CUDNN_HOME}/include ${eigen_INCLUDE_DIRS} ${TVM_INCLUDES} PUBLIC ${CMAKE_CUDA_TOOLKIT_INCLUDE_DIRECTORIES})
   # ${CMAKE_CURRENT_BINARY_DIR} is so that #include "onnxruntime_config.h" inside tensor_shape.h is found
   set_target_properties(onnxruntime_providers_cuda PROPERTIES LINKER_LANGUAGE CUDA)
   set_target_properties(onnxruntime_providers_cuda PROPERTIES FOLDER "ONNXRuntime")
 
+  if (onnxruntime_ENABLE_CUDA_PROFILING) # configure cupti for cuda profiling
+    target_include_directories(onnxruntime_providers_cuda PRIVATE ${onnxruntime_CUDA_HOME}/extras/CUPTI/include)
+    target_link_directories(onnxruntime_providers_cuda PRIVATE ${onnxruntime_CUDA_HOME}/extras/CUPTI/lib64)
+    target_link_libraries(onnxruntime_providers_cuda PRIVATE cupti)
+  endif()
+
   if (onnxruntime_ENABLE_NVTX_PROFILE)
     target_link_libraries(onnxruntime_providers_cuda PRIVATE nvToolsExt)
   endif()
diff --git a/dockerfiles/Dockerfile.openvino b/dockerfiles/Dockerfile.openvino
index b449ea1194465..18d40bd78eb2f 100644
--- a/dockerfiles/Dockerfile.openvino
+++ b/dockerfiles/Dockerfile.openvino
@@ -3,7 +3,7 @@
 # SPDX-License-Identifier: MIT
 #--------------------------------------------------------------------------
 
-ARG OPENVINO_VERSION=2021.4.1
+ARG OPENVINO_VERSION=2021.4.2
 
 
 # Build stage
diff --git a/dockerfiles/Dockerfile.openvino-centos7 b/dockerfiles/Dockerfile.openvino-centos7
index af14da9e64eb3..e500f3ce5a06a 100755
--- a/dockerfiles/Dockerfile.openvino-centos7
+++ b/dockerfiles/Dockerfile.openvino-centos7
@@ -8,12 +8,12 @@ FROM centos:7.8.2003
 WORKDIR /code
 
 ARG MY_ROOT=/code
-ARG YUM_OV_PACKAGE=intel-openvino-runtime-centos7-2021.4.689.x86_64
+ARG YUM_OV_PACKAGE=intel-openvino-runtime-centos7-2021.4.752.x86_64
 ARG DEVICE=CPU_FP32
 ARG ONNXRUNTIME_REPO=https://github.com/microsoft/onnxruntime
 ARG ONNXRUNTIME_BRANCH=master
 
-ENV INTEL_OPENVINO_DIR=/opt/intel/openvino_2021.4.689
+ENV INTEL_OPENVINO_DIR=/opt/intel/openvino_2021.4.752
 ENV InferenceEngine_DIR=${INTEL_OPENVINO_DIR}/deployment_tools/inference_engine/share
 ENV IE_PLUGINS_PATH=${INTEL_OPENVINO_DIR}/deployment_tools/inference_engine/lib/intel64
 ENV ngraph_DIR=${INTEL_OPENVINO_DIR}/deployment_tools/ngraph/cmake
@@ -58,7 +58,7 @@ RUN yum update -y && \
     yum update -y && yum list intel-openvino* && \
     yum install -y $YUM_OV_PACKAGE && \
     cd ${INTEL_OPENVINO_DIR}/install_dependencies/ && ./install_openvino_dependencies.sh -y && \
-    printf "\nexport LD_LIBRARY_PATH=\${LD_LIBRARY_PATH}:/usr/local/lib\n" >> /opt/intel/openvino_2021.4.689/bin/setupvars.sh && \
+    printf "\nexport LD_LIBRARY_PATH=\${LD_LIBRARY_PATH}:/usr/local/lib\n" >> /opt/intel/openvino_2021.4.752/bin/setupvars.sh && \
     cd /opt/libusb-1.0.22 && \
     /usr/bin/install -c -m 644 libusb-1.0.pc '/usr/local/lib/pkgconfig' && \
     cp /opt/intel/openvino_2021/deployment_tools/inference_engine/external/97-myriad-usbboot.rules /etc/udev/rules.d/ && \
diff --git a/dockerfiles/Dockerfile.openvino-csharp b/dockerfiles/Dockerfile.openvino-csharp
index eec0d0934cd4b..961e3b30f6cec 100644
--- a/dockerfiles/Dockerfile.openvino-csharp
+++ b/dockerfiles/Dockerfile.openvino-csharp
@@ -15,7 +15,7 @@ ARG MY_ROOT=/code
 ENV PATH /opt/miniconda/bin:/code/cmake-3.21.0-linux-x86_64/bin:$PATH
 ENV LD_LIBRARY_PATH=/opt/miniconda/lib:/usr/lib:/usr/lib/x86_64-linux-gnu:$LD_LIBRARY_PATH
 
-ENV INTEL_OPENVINO_DIR=/opt/intel/openvino_2021.4.689
+ENV INTEL_OPENVINO_DIR=/opt/intel/openvino_2021.4.752
 ENV InferenceEngine_DIR=${INTEL_OPENVINO_DIR}/deployment_tools/inference_engine/share
 ENV IE_PLUGINS_PATH=${INTEL_OPENVINO_DIR}/deployment_tools/inference_engine/lib/intel64
 ENV LD_LIBRARY_PATH=/opt/intel/opencl:${INTEL_OPENVINO_DIR}/inference_engine/external/gna/lib:${INTEL_OPENVINO_DIR}/deployment_tools/inference_engine/external/mkltiny_lnx/lib:$INTEL_OPENVINO_DIR/deployment_tools/ngraph/lib:${INTEL_OPENVINO_DIR}/deployment_tools/inference_engine/external/omp/lib:${INTEL_OPENVINO_DIR}/deployment_tools/inference_engine/external/tbb/lib:${IE_PLUGINS_PATH}:${LD_LIBRARY_PATH}
@@ -54,7 +54,7 @@ RUN apt update -y && \
     cd /etc/apt/sources.list.d && \
     echo "deb https://apt.repos.intel.com/openvino/2021 all main">intel-openvino-2021.list && \ 
     apt update -y && \
-    apt -y install intel-openvino-dev-ubuntu18-2021.4.689 && \
+    apt -y install intel-openvino-dev-ubuntu18-2021.4.752 && \
     cd ${INTEL_OPENVINO_DIR}/install_dependencies && ./install_openvino_dependencies.sh -y && \
     cd ${INTEL_OPENVINO_DIR} && rm -rf documentation data_processing && \
     cd deployment_tools/ && rm -rf model_optimizer open_model_zoo demo tools && \
@@ -82,7 +82,7 @@ RUN apt update -y && \
     cd ${MY_ROOT} && \
     apt install -y gnupg ca-certificates && \
     #apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 3FA7E0328081BFF6A14DA29AA6A19B38D3D831EF && \
-    curl http://download.mono-project.com/repo/xamarin.gpg | apt-key add - && \
+    curl https://download.mono-project.com/repo/xamarin.gpg | apt-key add - && \
     echo "deb https://download.mono-project.com/repo/ubuntu stable-bionic main" | sudo tee /etc/apt/sources.list.d/mono-official-stable.list && \
     apt update -y && \
     apt install -y mono-devel && \
@@ -97,13 +97,14 @@ RUN apt update -y && \
     apt-get update -y &&\
     apt-get install -y apt-transport-https && \
     apt-get update -y && \
-    apt-get install -y dotnet-sdk-3.1 && \
+    apt-get install -y dotnet-sdk-5.0 && \
 # Download and build ONNX Runtime
     cd ${MY_ROOT} && \
     git clone --recursive -b ${ONNXRUNTIME_BRANCH} ${ONNXRUNTIME_REPO} && \
     /bin/sh onnxruntime/dockerfiles/scripts/install_common_deps.sh && \
     pip install onnx==1.9 && \
     cd ${MY_ROOT}/onnxruntime && ./build.sh --config Release --update --build --parallel --use_openvino ${DEVICE} --build_nuget --build_shared_lib && \
+    cp ${MY_ROOT}/onnxruntime/build/Linux/Release/Microsoft.ML.OnnxRuntime.Managed* ${MY_ROOT}/onnxruntime/build/Linux/Release/nuget-artifacts && \
     mv ${MY_ROOT}/onnxruntime/build/Linux/Release/nuget-artifacts ${MY_ROOT} && \
 # Clean-up unnecessary files
     rm -rf ${MY_ROOT}/cmake* /opt/cmake ${MY_ROOT}/onnxruntime && \
diff --git a/js/node/package-lock.json b/js/node/package-lock.json
index c7b67aac3a996..8024c64131416 100644
--- a/js/node/package-lock.json
+++ b/js/node/package-lock.json
@@ -107,18 +107,6 @@
       "integrity": "sha512-sL/cEvJWAnClXw0wHk85/2L0G6Sj8UB0Ctc1TEMbKSsmpRosqhwj9gWgFRZSrBr2f9tiXISwNhCPmlfqUqyb9Q==",
       "dev": true
     },
-    "ajv": {
-      "version": "6.12.6",
-      "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
-      "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
-      "dev": true,
-      "requires": {
-        "fast-deep-equal": "^3.1.1",
-        "fast-json-stable-stringify": "^2.0.0",
-        "json-schema-traverse": "^0.4.1",
-        "uri-js": "^4.2.2"
-      }
-    },
     "ansi": {
       "version": "0.3.1",
       "resolved": "https://registry.npmjs.org/ansi/-/ansi-0.3.1.tgz",
@@ -156,31 +144,52 @@
         "picomatch": "^2.0.4"
       }
     },
-    "argparse": {
-      "version": "2.0.1",
-      "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
-      "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
-      "dev": true
-    },
-    "asn1": {
-      "version": "0.2.4",
-      "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz",
-      "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==",
+    "are-we-there-yet": {
+      "version": "1.0.6",
+      "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-1.0.6.tgz",
+      "integrity": "sha1-otKMkxAqpsyWJFomy5VN4G7FPww=",
       "dev": true,
       "requires": {
-        "safer-buffer": "~2.1.0"
+        "delegates": "^1.0.0",
+        "readable-stream": "^2.0.0 || ^1.1.13"
+      },
+      "dependencies": {
+        "isarray": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+          "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=",
+          "dev": true
+        },
+        "readable-stream": {
+          "version": "2.3.7",
+          "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
+          "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
+          "dev": true,
+          "requires": {
+            "core-util-is": "~1.0.0",
+            "inherits": "~2.0.3",
+            "isarray": "~1.0.0",
+            "process-nextick-args": "~2.0.0",
+            "safe-buffer": "~5.1.1",
+            "string_decoder": "~1.1.1",
+            "util-deprecate": "~1.0.1"
+          }
+        },
+        "string_decoder": {
+          "version": "1.1.1",
+          "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+          "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
+          "dev": true,
+          "requires": {
+            "safe-buffer": "~5.1.0"
+          }
+        }
       }
     },
-    "assert-plus": {
-      "version": "1.0.0",
-      "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz",
-      "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=",
-      "dev": true
-    },
-    "asynckit": {
-      "version": "0.4.0",
-      "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz",
-      "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=",
+    "argparse": {
+      "version": "2.0.1",
+      "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz",
+      "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==",
       "dev": true
     },
     "at-least-node": {
@@ -189,17 +198,14 @@
       "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==",
       "dev": true
     },
-    "aws-sign2": {
-      "version": "0.7.0",
-      "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz",
-      "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=",
-      "dev": true
-    },
-    "aws4": {
-      "version": "1.11.0",
-      "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz",
-      "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==",
-      "dev": true
+    "axios": {
+      "version": "0.21.4",
+      "resolved": "https://registry.npmjs.org/axios/-/axios-0.21.4.tgz",
+      "integrity": "sha512-ut5vewkiu8jjGBdqpM44XxjuCjq9LAKeHVmoVfHVzy8eHgxxq8SbAVQNovDA8mVi05kP0Ea/n/UzcSHcTJQfNg==",
+      "dev": true,
+      "requires": {
+        "follow-redirects": "^1.14.0"
+      }
     },
     "balanced-match": {
       "version": "1.0.2",
@@ -207,19 +213,10 @@
       "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==",
       "dev": true
     },
-    "bcrypt-pbkdf": {
-      "version": "1.0.2",
-      "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz",
-      "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=",
-      "dev": true,
-      "requires": {
-        "tweetnacl": "^0.14.3"
-      }
-    },
     "big-integer": {
-      "version": "1.6.48",
-      "resolved": "https://registry.npmjs.org/big-integer/-/big-integer-1.6.48.tgz",
-      "integrity": "sha512-j51egjPa7/i+RdiRuJbPdJ2FIUYYPhvYLjzoYbcMMm62ooO6F94fETG4MTs46zPAF9Brs04OajboA/qTGuz78w==",
+      "version": "1.6.51",
+      "resolved": "https://registry.npmjs.org/big-integer/-/big-integer-1.6.51.tgz",
+      "integrity": "sha512-GPEid2Y9QU1Exl1rpO9B2IPJGHPSupF5GnVIP0blYvNOMer2bTvSWs1jGOUg04hTmu67nmLsQ9TBo1puaotBHg==",
       "dev": true
     },
     "binary": {
@@ -293,12 +290,6 @@
       "integrity": "sha1-fB0W1nmhu+WcoCys7PsBHiAfWh8=",
       "dev": true
     },
-    "caseless": {
-      "version": "0.12.0",
-      "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz",
-      "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=",
-      "dev": true
-    },
     "chainsaw": {
       "version": "0.1.0",
       "resolved": "https://registry.npmjs.org/chainsaw/-/chainsaw-0.1.0.tgz",
@@ -363,11 +354,12 @@
       }
     },
     "cmake-js": {
-      "version": "6.1.0",
-      "resolved": "https://registry.npmjs.org/cmake-js/-/cmake-js-6.1.0.tgz",
-      "integrity": "sha512-utmukLQftpgrCpGRCaHnkv4K27HZNNFqmBl4vnvccy0xp4c1erxjFU/Lq4wn5ngAhFZmpwBPQfoKWKThjSBiwg==",
+      "version": "6.2.1",
+      "resolved": "https://registry.npmjs.org/cmake-js/-/cmake-js-6.2.1.tgz",
+      "integrity": "sha512-wEpg0Z8SY6ihXTe+xosadh4PbASdWSM/locbLacWRYJCZfAjWLyOrd4RoVIeirLkfPxmG8GdNQA9tW/Rz5SfJA==",
       "dev": true,
       "requires": {
+        "axios": "^0.21.1",
         "debug": "^4",
         "fs-extra": "^5.0.0",
         "is-iojs": "^1.0.1",
@@ -375,7 +367,6 @@
         "memory-stream": "0",
         "npmlog": "^1.2.0",
         "rc": "^1.2.7",
-        "request": "^2.54.0",
         "semver": "^5.0.3",
         "splitargs": "0",
         "tar": "^4",
@@ -385,16 +376,6 @@
         "yargs": "^3.6.0"
       },
       "dependencies": {
-        "are-we-there-yet": {
-          "version": "1.0.6",
-          "resolved": "https://registry.npmjs.org/are-we-there-yet/-/are-we-there-yet-1.0.6.tgz",
-          "integrity": "sha1-otKMkxAqpsyWJFomy5VN4G7FPww=",
-          "dev": true,
-          "requires": {
-            "delegates": "^1.0.0",
-            "readable-stream": "^2.0.0 || ^1.1.13"
-          }
-        },
         "fs-extra": {
           "version": "5.0.0",
           "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-5.0.0.tgz",
@@ -405,30 +386,6 @@
             "jsonfile": "^4.0.0",
             "universalify": "^0.1.0"
           }
-        },
-        "gauge": {
-          "version": "1.2.7",
-          "resolved": "https://registry.npmjs.org/gauge/-/gauge-1.2.7.tgz",
-          "integrity": "sha1-6c7FSD09TuDvRLYKfZnkk14TbZM=",
-          "dev": true,
-          "requires": {
-            "ansi": "^0.3.0",
-            "has-unicode": "^2.0.0",
-            "lodash.pad": "^4.1.0",
-            "lodash.padend": "^4.1.0",
-            "lodash.padstart": "^4.1.0"
-          }
-        },
-        "npmlog": {
-          "version": "1.2.1",
-          "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-1.2.1.tgz",
-          "integrity": "sha1-KOe+YZYJtT960d0wChDWTXFiaLY=",
-          "dev": true,
-          "requires": {
-            "ansi": "~0.3.0",
-            "are-we-there-yet": "~1.0.0",
-            "gauge": "~1.2.0"
-          }
         }
       }
     },
@@ -453,15 +410,6 @@
       "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
       "dev": true
     },
-    "combined-stream": {
-      "version": "1.0.8",
-      "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz",
-      "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==",
-      "dev": true,
-      "requires": {
-        "delayed-stream": "~1.0.0"
-      }
-    },
     "concat-map": {
       "version": "0.0.1",
       "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz",
@@ -469,20 +417,11 @@
       "dev": true
     },
     "core-util-is": {
-      "version": "1.0.2",
-      "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz",
-      "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=",
+      "version": "1.0.3",
+      "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.3.tgz",
+      "integrity": "sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==",
       "dev": true
     },
-    "dashdash": {
-      "version": "1.14.1",
-      "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz",
-      "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=",
-      "dev": true,
-      "requires": {
-        "assert-plus": "^1.0.0"
-      }
-    },
     "debug": {
       "version": "4.3.1",
       "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.1.tgz",
@@ -504,12 +443,6 @@
       "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==",
       "dev": true
     },
-    "delayed-stream": {
-      "version": "1.0.0",
-      "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz",
-      "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=",
-      "dev": true
-    },
     "delegates": {
       "version": "1.0.0",
       "resolved": "https://registry.npmjs.org/delegates/-/delegates-1.0.0.tgz",
@@ -529,16 +462,38 @@
       "dev": true,
       "requires": {
         "readable-stream": "^2.0.2"
-      }
-    },
-    "ecc-jsbn": {
-      "version": "0.1.2",
-      "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz",
-      "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=",
-      "dev": true,
-      "requires": {
-        "jsbn": "~0.1.0",
-        "safer-buffer": "^2.1.0"
+      },
+      "dependencies": {
+        "isarray": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+          "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=",
+          "dev": true
+        },
+        "readable-stream": {
+          "version": "2.3.7",
+          "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
+          "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
+          "dev": true,
+          "requires": {
+            "core-util-is": "~1.0.0",
+            "inherits": "~2.0.3",
+            "isarray": "~1.0.0",
+            "process-nextick-args": "~2.0.0",
+            "safe-buffer": "~5.1.1",
+            "string_decoder": "~1.1.1",
+            "util-deprecate": "~1.0.1"
+          }
+        },
+        "string_decoder": {
+          "version": "1.1.1",
+          "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
+          "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
+          "dev": true,
+          "requires": {
+            "safe-buffer": "~5.1.0"
+          }
+        }
       }
     },
     "emoji-regex": {
@@ -568,30 +523,6 @@
       "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
       "dev": true
     },
-    "extend": {
-      "version": "3.0.2",
-      "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz",
-      "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==",
-      "dev": true
-    },
-    "extsprintf": {
-      "version": "1.3.0",
-      "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz",
-      "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=",
-      "dev": true
-    },
-    "fast-deep-equal": {
-      "version": "3.1.3",
-      "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz",
-      "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==",
-      "dev": true
-    },
-    "fast-json-stable-stringify": {
-      "version": "2.1.0",
-      "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz",
-      "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==",
-      "dev": true
-    },
     "fast-safe-stringify": {
       "version": "2.0.7",
       "resolved": "https://registry.npmjs.org/fast-safe-stringify/-/fast-safe-stringify-2.0.7.tgz",
@@ -623,23 +554,12 @@
       "integrity": "sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==",
       "dev": true
     },
-    "forever-agent": {
-      "version": "0.6.1",
-      "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz",
-      "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=",
+    "follow-redirects": {
+      "version": "1.14.5",
+      "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.14.5.tgz",
+      "integrity": "sha512-wtphSXy7d4/OR+MvIFbCVBDzZ5520qV8XfPklSN5QtxuMUJZ+b0Wnst1e1lCDocfzuCkHqj8k0FpZqO+UIaKNA==",
       "dev": true
     },
-    "form-data": {
-      "version": "2.3.3",
-      "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz",
-      "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==",
-      "dev": true,
-      "requires": {
-        "asynckit": "^0.4.0",
-        "combined-stream": "^1.0.6",
-        "mime-types": "^2.1.12"
-      }
-    },
     "fs-extra": {
       "version": "9.1.0",
       "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz",
@@ -704,21 +624,25 @@
         "rimraf": "2"
       }
     },
+    "gauge": {
+      "version": "1.2.7",
+      "resolved": "https://registry.npmjs.org/gauge/-/gauge-1.2.7.tgz",
+      "integrity": "sha1-6c7FSD09TuDvRLYKfZnkk14TbZM=",
+      "dev": true,
+      "requires": {
+        "ansi": "^0.3.0",
+        "has-unicode": "^2.0.0",
+        "lodash.pad": "^4.1.0",
+        "lodash.padend": "^4.1.0",
+        "lodash.padstart": "^4.1.0"
+      }
+    },
     "get-caller-file": {
       "version": "2.0.5",
       "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz",
       "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==",
       "dev": true
     },
-    "getpass": {
-      "version": "0.1.7",
-      "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz",
-      "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=",
-      "dev": true,
-      "requires": {
-        "assert-plus": "^1.0.0"
-      }
-    },
     "glob": {
       "version": "7.1.6",
       "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz",
@@ -754,22 +678,6 @@
       "integrity": "sha512-qBr4OuELkhPenW6goKVXiv47US3clb3/IbuWF9KNKEijAy9oeHxU9IgzjvJhHkUzhaj7rOUD7+YGWqUjLp5oSA==",
       "dev": true
     },
-    "har-schema": {
-      "version": "2.0.0",
-      "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz",
-      "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=",
-      "dev": true
-    },
-    "har-validator": {
-      "version": "5.1.5",
-      "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz",
-      "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==",
-      "dev": true,
-      "requires": {
-        "ajv": "^6.12.3",
-        "har-schema": "^2.0.0"
-      }
-    },
     "has-flag": {
       "version": "4.0.0",
       "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz",
@@ -788,17 +696,6 @@
       "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==",
       "dev": true
     },
-    "http-signature": {
-      "version": "1.2.0",
-      "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz",
-      "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=",
-      "dev": true,
-      "requires": {
-        "assert-plus": "^1.0.0",
-        "jsprim": "^1.2.2",
-        "sshpk": "^1.7.0"
-      }
-    },
     "inflight": {
       "version": "1.0.6",
       "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz",
@@ -884,16 +781,10 @@
       "integrity": "sha512-YWnfyRwxL/+SsrWYfOpUtz5b3YD+nyfkHvjbcanzk8zgyO4ASD67uVMRt8k5bM4lLMDnXfriRhOpemw+NfT1eA==",
       "dev": true
     },
-    "is-typedarray": {
-      "version": "1.0.0",
-      "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz",
-      "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=",
-      "dev": true
-    },
     "isarray": {
-      "version": "1.0.0",
-      "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
-      "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=",
+      "version": "0.0.1",
+      "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
+      "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=",
       "dev": true
     },
     "isexe": {
@@ -902,12 +793,6 @@
       "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=",
       "dev": true
     },
-    "isstream": {
-      "version": "0.1.2",
-      "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz",
-      "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=",
-      "dev": true
-    },
     "js-yaml": {
       "version": "4.0.0",
       "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.0.0.tgz",
@@ -917,36 +802,12 @@
         "argparse": "^2.0.1"
       }
     },
-    "jsbn": {
-      "version": "0.1.1",
-      "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz",
-      "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=",
-      "dev": true
-    },
     "json-parse-better-errors": {
       "version": "1.0.2",
       "resolved": "https://registry.npmjs.org/json-parse-better-errors/-/json-parse-better-errors-1.0.2.tgz",
       "integrity": "sha512-mrqyZKfX5EhL7hvqcV6WG1yYjnjeuYDzDhhcAAUrq8Po85NBQBJP+ZDUT75qZQ98IkUoBqdkExkukOU7Ts2wrw==",
       "dev": true
     },
-    "json-schema": {
-      "version": "0.2.3",
-      "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz",
-      "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=",
-      "dev": true
-    },
-    "json-schema-traverse": {
-      "version": "0.4.1",
-      "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
-      "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==",
-      "dev": true
-    },
-    "json-stringify-safe": {
-      "version": "5.0.1",
-      "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz",
-      "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=",
-      "dev": true
-    },
     "jsonc": {
       "version": "2.0.0",
       "resolved": "https://registry.npmjs.org/jsonc/-/jsonc-2.0.0.tgz",
@@ -978,18 +839,6 @@
         "graceful-fs": "^4.1.6"
       }
     },
-    "jsprim": {
-      "version": "1.4.1",
-      "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz",
-      "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=",
-      "dev": true,
-      "requires": {
-        "assert-plus": "1.0.0",
-        "extsprintf": "1.3.0",
-        "json-schema": "0.2.3",
-        "verror": "1.10.0"
-      }
-    },
     "lcid": {
       "version": "1.0.0",
       "resolved": "https://registry.npmjs.org/lcid/-/lcid-1.0.0.tgz",
@@ -1060,47 +909,6 @@
       "dev": true,
       "requires": {
         "readable-stream": "~1.0.26-2"
-      },
-      "dependencies": {
-        "isarray": {
-          "version": "0.0.1",
-          "resolved": "https://registry.npmjs.org/isarray/-/isarray-0.0.1.tgz",
-          "integrity": "sha1-ihis/Kmo9Bd+Cav8YDiTmwXR7t8=",
-          "dev": true
-        },
-        "readable-stream": {
-          "version": "1.0.34",
-          "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.0.34.tgz",
-          "integrity": "sha1-Elgg40vIQtLyqq+v5MKRbuMsFXw=",
-          "dev": true,
-          "requires": {
-            "core-util-is": "~1.0.0",
-            "inherits": "~2.0.1",
-            "isarray": "0.0.1",
-            "string_decoder": "~0.10.x"
-          }
-        },
-        "string_decoder": {
-          "version": "0.10.31",
-          "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz",
-          "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=",
-          "dev": true
-        }
-      }
-    },
-    "mime-db": {
-      "version": "1.47.0",
-      "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.47.0.tgz",
-      "integrity": "sha512-QBmA/G2y+IfeS4oktet3qRZ+P5kPhCKRXxXnQEudYqUaEioAU1/Lq2us3D/t1Jfo4hE9REQPrbB7K5sOczJVIw==",
-      "dev": true
-    },
-    "mime-types": {
-      "version": "2.1.30",
-      "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.30.tgz",
-      "integrity": "sha512-crmjA4bLtR8m9qLpHvgxSChT+XoSlZi8J4n/aIdn3z92e/U47Z0V/yl+Wh9W046GgFVAmoNR/fmdbZYcSSIUeg==",
-      "dev": true,
-      "requires": {
-        "mime-db": "1.47.0"
       }
     },
     "minimatch": {
@@ -1300,18 +1108,23 @@
       "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==",
       "dev": true
     },
+    "npmlog": {
+      "version": "1.2.1",
+      "resolved": "https://registry.npmjs.org/npmlog/-/npmlog-1.2.1.tgz",
+      "integrity": "sha1-KOe+YZYJtT960d0wChDWTXFiaLY=",
+      "dev": true,
+      "requires": {
+        "ansi": "~0.3.0",
+        "are-we-there-yet": "~1.0.0",
+        "gauge": "~1.2.0"
+      }
+    },
     "number-is-nan": {
       "version": "1.0.1",
       "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz",
       "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=",
       "dev": true
     },
-    "oauth-sign": {
-      "version": "0.9.0",
-      "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz",
-      "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==",
-      "dev": true
-    },
     "once": {
       "version": "1.4.0",
       "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
@@ -2504,12 +2317,6 @@
       "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=",
       "dev": true
     },
-    "performance-now": {
-      "version": "2.1.0",
-      "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz",
-      "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=",
-      "dev": true
-    },
     "picomatch": {
       "version": "2.2.3",
       "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.2.3.tgz",
@@ -2551,24 +2358,6 @@
         }
       }
     },
-    "psl": {
-      "version": "1.8.0",
-      "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz",
-      "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==",
-      "dev": true
-    },
-    "punycode": {
-      "version": "2.1.1",
-      "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz",
-      "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==",
-      "dev": true
-    },
-    "qs": {
-      "version": "6.5.2",
-      "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz",
-      "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==",
-      "dev": true
-    },
     "randombytes": {
       "version": "2.1.0",
       "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz",
@@ -2591,18 +2380,15 @@
       }
     },
     "readable-stream": {
-      "version": "2.3.7",
-      "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz",
-      "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==",
+      "version": "1.0.34",
+      "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-1.0.34.tgz",
+      "integrity": "sha1-Elgg40vIQtLyqq+v5MKRbuMsFXw=",
       "dev": true,
       "requires": {
         "core-util-is": "~1.0.0",
-        "inherits": "~2.0.3",
-        "isarray": "~1.0.0",
-        "process-nextick-args": "~2.0.0",
-        "safe-buffer": "~5.1.1",
-        "string_decoder": "~1.1.1",
-        "util-deprecate": "~1.0.1"
+        "inherits": "~2.0.1",
+        "isarray": "0.0.1",
+        "string_decoder": "~0.10.x"
       }
     },
     "readdirp": {
@@ -2614,34 +2400,6 @@
         "picomatch": "^2.2.1"
       }
     },
-    "request": {
-      "version": "2.88.2",
-      "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz",
-      "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==",
-      "dev": true,
-      "requires": {
-        "aws-sign2": "~0.7.0",
-        "aws4": "^1.8.0",
-        "caseless": "~0.12.0",
-        "combined-stream": "~1.0.6",
-        "extend": "~3.0.2",
-        "forever-agent": "~0.6.1",
-        "form-data": "~2.3.2",
-        "har-validator": "~5.1.3",
-        "http-signature": "~1.2.0",
-        "is-typedarray": "~1.0.0",
-        "isstream": "~0.1.2",
-        "json-stringify-safe": "~5.0.1",
-        "mime-types": "~2.1.19",
-        "oauth-sign": "~0.9.0",
-        "performance-now": "^2.1.0",
-        "qs": "~6.5.2",
-        "safe-buffer": "^5.1.2",
-        "tough-cookie": "~2.5.0",
-        "tunnel-agent": "^0.6.0",
-        "uuid": "^3.3.2"
-      }
-    },
     "require-directory": {
       "version": "2.1.1",
       "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
@@ -2663,12 +2421,6 @@
       "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==",
       "dev": true
     },
-    "safer-buffer": {
-      "version": "2.1.2",
-      "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz",
-      "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
-      "dev": true
-    },
     "semver": {
       "version": "5.7.1",
       "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz",
@@ -2696,23 +2448,6 @@
       "integrity": "sha1-/p965lc3GzOxDLgNoUPPgknPazs=",
       "dev": true
     },
-    "sshpk": {
-      "version": "1.16.1",
-      "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz",
-      "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==",
-      "dev": true,
-      "requires": {
-        "asn1": "~0.2.3",
-        "assert-plus": "^1.0.0",
-        "bcrypt-pbkdf": "^1.0.0",
-        "dashdash": "^1.12.0",
-        "ecc-jsbn": "~0.1.1",
-        "getpass": "^0.1.1",
-        "jsbn": "~0.1.0",
-        "safer-buffer": "^2.0.2",
-        "tweetnacl": "~0.14.0"
-      }
-    },
     "string-width": {
       "version": "1.0.2",
       "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz",
@@ -2725,13 +2460,10 @@
       }
     },
     "string_decoder": {
-      "version": "1.1.1",
-      "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz",
-      "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==",
-      "dev": true,
-      "requires": {
-        "safe-buffer": "~5.1.0"
-      }
+      "version": "0.10.31",
+      "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz",
+      "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=",
+      "dev": true
     },
     "strip-ansi": {
       "version": "3.0.1",
@@ -2795,37 +2527,12 @@
         "is-number": "^7.0.0"
       }
     },
-    "tough-cookie": {
-      "version": "2.5.0",
-      "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz",
-      "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==",
-      "dev": true,
-      "requires": {
-        "psl": "^1.1.28",
-        "punycode": "^2.1.1"
-      }
-    },
     "traverse": {
       "version": "0.3.9",
       "resolved": "https://registry.npmjs.org/traverse/-/traverse-0.3.9.tgz",
       "integrity": "sha1-cXuPIgzAu3tE5AUUwisui7xw2Lk=",
       "dev": true
     },
-    "tunnel-agent": {
-      "version": "0.6.0",
-      "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz",
-      "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=",
-      "dev": true,
-      "requires": {
-        "safe-buffer": "^5.0.1"
-      }
-    },
-    "tweetnacl": {
-      "version": "0.14.5",
-      "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz",
-      "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=",
-      "dev": true
-    },
     "typescript": {
       "version": "4.2.4",
       "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.2.4.tgz",
@@ -2855,6 +2562,12 @@
         "setimmediate": "~1.0.4"
       },
       "dependencies": {
+        "isarray": {
+          "version": "1.0.0",
+          "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz",
+          "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=",
+          "dev": true
+        },
         "process-nextick-args": {
           "version": "1.0.7",
           "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-1.0.7.tgz",
@@ -2875,24 +2588,9 @@
             "string_decoder": "~0.10.x",
             "util-deprecate": "~1.0.1"
           }
-        },
-        "string_decoder": {
-          "version": "0.10.31",
-          "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-0.10.31.tgz",
-          "integrity": "sha1-YuIDvEF2bGwoyfyEMB2rHFMQ+pQ=",
-          "dev": true
         }
       }
     },
-    "uri-js": {
-      "version": "4.4.1",
-      "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz",
-      "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==",
-      "dev": true,
-      "requires": {
-        "punycode": "^2.1.0"
-      }
-    },
     "url-join": {
       "version": "0.0.1",
       "resolved": "https://registry.npmjs.org/url-join/-/url-join-0.0.1.tgz",
@@ -2905,23 +2603,6 @@
       "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=",
       "dev": true
     },
-    "uuid": {
-      "version": "3.4.0",
-      "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz",
-      "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==",
-      "dev": true
-    },
-    "verror": {
-      "version": "1.10.0",
-      "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz",
-      "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=",
-      "dev": true,
-      "requires": {
-        "assert-plus": "^1.0.0",
-        "core-util-is": "1.0.2",
-        "extsprintf": "^1.2.0"
-      }
-    },
     "which": {
       "version": "1.3.1",
       "resolved": "https://registry.npmjs.org/which/-/which-1.3.1.tgz",
diff --git a/js/node/package.json b/js/node/package.json
index 7f8524381584f..e05abd6abebba 100644
--- a/js/node/package.json
+++ b/js/node/package.json
@@ -32,7 +32,7 @@
     "@types/minimist": "1.2.1",
     "@types/mocha": "^8.2.2",
     "@types/node": "^14.14.37",
-    "cmake-js": "^6.1.0",
+    "cmake-js": "^6.2.1",
     "fs-extra": "^9.1.0",
     "jsonc": "^2.0.0",
     "minimist": "^1.2.5",
diff --git a/js/web/lib/onnxjs/attribute.ts b/js/web/lib/onnxjs/attribute.ts
index 5b1b926476e87..2fda9ee3340fe 100644
--- a/js/web/lib/onnxjs/attribute.ts
+++ b/js/web/lib/onnxjs/attribute.ts
@@ -7,7 +7,7 @@ import {onnxruntime} from './ort-schema/ort-generated';
 import ortFbs = onnxruntime.experimental.fbs;
 
 import {Tensor} from './tensor';
-import {LongUtil} from './util';
+import {decodeUtf8String, LongUtil} from './util';
 
 export declare namespace Attribute {
   export interface DataTypeMap {
@@ -171,7 +171,7 @@ export class Attribute {
       // string attributes are returned as string, so no conversion is needed.
       if (attr instanceof onnx.AttributeProto) {
         const utf8String = value as Uint8Array;
-        return Buffer.from(utf8String.buffer, utf8String.byteOffset, utf8String.byteLength).toString();
+        return decodeUtf8String(utf8String);
       }
     }
 
@@ -181,8 +181,7 @@ export class Attribute {
       // format strings attributes are returned as string[], so no conversion is needed.
       if (attr instanceof onnx.AttributeProto) {
         const utf8Strings = value as Uint8Array[];
-        return utf8Strings.map(
-            utf8String => Buffer.from(utf8String.buffer, utf8String.byteOffset, utf8String.byteLength).toString());
+        return utf8Strings.map(decodeUtf8String);
       }
     }
 
diff --git a/js/web/lib/onnxjs/session.ts b/js/web/lib/onnxjs/session.ts
index 2978aaa4e5ef6..3a27a424e7aed 100644
--- a/js/web/lib/onnxjs/session.ts
+++ b/js/web/lib/onnxjs/session.ts
@@ -63,7 +63,7 @@ export class Session {
         if (typeof fetch === 'undefined') {
           // node
           const buf = await promisify(readFile)(arg);
-          this.initialize(Buffer.from(buf), isOrtFormat);
+          this.initialize(buf, isOrtFormat);
         } else {
           // browser
           const response = await fetch(arg);
diff --git a/js/web/lib/onnxjs/tensor.ts b/js/web/lib/onnxjs/tensor.ts
index df19d67caac0f..42757d0ef7189 100644
--- a/js/web/lib/onnxjs/tensor.ts
+++ b/js/web/lib/onnxjs/tensor.ts
@@ -9,7 +9,7 @@ import {onnxruntime} from './ort-schema/ort-generated';
 
 import ortFbs = onnxruntime.experimental.fbs;
 
-import {ProtoUtil, ShapeUtil} from './util';
+import {decodeUtf8String, ProtoUtil, ShapeUtil} from './util';
 
 export declare namespace Tensor {
   export interface DataTypeMap {
@@ -217,8 +217,7 @@ export class Tensor {
       // When it's STRING type, the value should always be stored in field
       // 'stringData'
       tensorProto.stringData!.forEach((str, i) => {
-        const buf = Buffer.from(str.buffer, str.byteOffset, str.byteLength);
-        value.data[i] = buf.toString();
+        value.data[i] = decodeUtf8String(str);
       });
 
     } else if (
diff --git a/js/web/lib/onnxjs/util.ts b/js/web/lib/onnxjs/util.ts
index 73d3ca88fc1c1..2e2d49ae4b45c 100644
--- a/js/web/lib/onnxjs/util.ts
+++ b/js/web/lib/onnxjs/util.ts
@@ -1249,3 +1249,7 @@ export class PoolConvUtil {
 
 export const MIN_CLIP = -3.4028234663852886e+38;
 export const MAX_CLIP = 3.4028234663852886e+38;
+
+export function decodeUtf8String(buffer: Uint8Array): string {
+  return new TextDecoder().decode(buffer);
+}
diff --git a/js/web/package.json b/js/web/package.json
index 210d151e4083b..32f95d118fd9e 100644
--- a/js/web/package.json
+++ b/js/web/package.json
@@ -43,6 +43,7 @@
     "@types/mocha": "^8.2.2",
     "@types/npmlog": "^4.1.2",
     "@types/platform": "^1.3.3",
+    "base64-js": "^1.5.1",
     "chai": "^4.3.4",
     "dir-compare": "^3.3.0",
     "electron": "^12.2.3",
diff --git a/js/web/test/test-runner.ts b/js/web/test/test-runner.ts
index 68571db37a965..fa4f4d0413b5e 100644
--- a/js/web/test/test-runner.ts
+++ b/js/web/test/test-runner.ts
@@ -43,20 +43,20 @@ function fromInternalTensor(tensor: Tensor): ort.Tensor {
   return new ort.Tensor(tensor.type, tensor.data as ort.Tensor.DataType, tensor.dims);
 }
 
-async function loadFile(uri: string): Promise<Uint8Array|ArrayBuffer> {
+async function loadFile(uri: string): Promise<Uint8Array> {
   if (typeof fetch === 'undefined') {
     // node
     return promisify(readFile)(uri);
   } else {
     // browser
     const response = await fetch(uri);
-    return response.arrayBuffer();
+    return new Uint8Array(await response.arrayBuffer());
   }
 }
 
 async function loadTensorProto(uriOrData: string|Uint8Array): Promise<Test.NamedTensor> {
   const buf = (typeof uriOrData === 'string') ? await loadFile(uriOrData) : uriOrData;
-  const tensorProto = onnxProto.TensorProto.decode(Buffer.from(buf));
+  const tensorProto = onnxProto.TensorProto.decode(buf);
   const tensor = Tensor.fromProto(tensorProto);
   // add property 'name' to the tensor object.
   const namedTensor = fromInternalTensor(tensor) as unknown as Test.NamedTensor;
diff --git a/js/web/test/test-shared.ts b/js/web/test/test-shared.ts
index f2ea1892aa7bc..41a485bb3dad9 100644
--- a/js/web/test/test-shared.ts
+++ b/js/web/test/test-shared.ts
@@ -1,6 +1,7 @@
 // Copyright (c) Microsoft Corporation. All rights reserved.
 // Licensed under the MIT License.
 
+import * as base64 from 'base64-js';
 import * as fs from 'fs';
 import {promisify} from 'util';
 
@@ -8,11 +9,11 @@ import {Attribute} from '../lib/onnxjs/attribute';
 import {Graph} from '../lib/onnxjs/graph';
 
 export function base64toBuffer(data: string): Uint8Array {
-  return Buffer.from(data, 'base64');
+  return base64.toByteArray(data);
 }
 
 export function bufferToBase64(buffer: Uint8Array): string {
-  return Buffer.from(buffer).toString('base64');
+  return base64.fromByteArray(buffer);
 }
 
 async function readFile(file: string) {
@@ -22,14 +23,13 @@ async function readFile(file: string) {
   } else {
     // browser
     const response = await fetch(file);
-    const buffer = await response.arrayBuffer();
-    return Buffer.from(buffer);
+    return new Uint8Array(await response.arrayBuffer());
   }
 }
 
 export async function readJsonFile(file: string): Promise<any> {
   const content = await readFile(file);
-  return JSON.parse(content.toString());
+  return JSON.parse(new TextDecoder().decode(content));
 }
 
 /**
diff --git a/js/web/webpack.config.js b/js/web/webpack.config.js
index 37bc824efac76..79de304ccd233 100644
--- a/js/web/webpack.config.js
+++ b/js/web/webpack.config.js
@@ -245,7 +245,7 @@ function buildTestRunnerConfig({
       new webpack.DefinePlugin({ BUILD_DEFS: DEFAULT_BUILD_DEFS }),
       new webpack.WatchIgnorePlugin({ paths: [/\.js$/, /\.d\.ts$/] }),
       new NodePolyfillPlugin({
-        excludeAliases: ["console"]
+        excludeAliases: ["console", "Buffer"]
       }),
     ],
     module: {
diff --git a/onnxruntime/contrib_ops/cuda/bert/skip_layer_norm.cc b/onnxruntime/contrib_ops/cuda/bert/skip_layer_norm.cc
index dd975ca90fb49..d6cac35261980 100644
--- a/onnxruntime/contrib_ops/cuda/bert/skip_layer_norm.cc
+++ b/onnxruntime/contrib_ops/cuda/bert/skip_layer_norm.cc
@@ -41,6 +41,14 @@ Status SkipLayerNorm<T>::ComputeInternal(OpKernelContext* ctx) const {
 
   Tensor* output = ctx->Output(0, input->Shape());
 
+  if (input->SizeInBytes() == 0) {
+    return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Inputs 'input' has no data from upstream nodes");
+  }
+
+  if (skip->SizeInBytes() == 0) {
+    return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Inputs 'skip' has no data from upstream nodes");
+  }
+
   const auto& input_dims = input->Shape().GetDims();
   if (input_dims.size() != 3) {
     return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT,
diff --git a/onnxruntime/contrib_ops/cuda/inverse.cc b/onnxruntime/contrib_ops/cuda/inverse.cc
index b0560c874a459..4f4da3228ebe6 100644
--- a/onnxruntime/contrib_ops/cuda/inverse.cc
+++ b/onnxruntime/contrib_ops/cuda/inverse.cc
@@ -150,7 +150,7 @@ Status Inverse::ComputeInternal(OpKernelContext* ctx) const {
   }
 
   IAllocatorUniquePtr<int> info = GetScratchBuffer<int>(num_batches);
-  CUDA_RETURN_IF_ERROR(cudaMemsetAsync(info.get(), 0, num_batches, Stream()));
+  CUDA_RETURN_IF_ERROR(cudaMemsetAsync(info.get(), 0, num_batches * sizeof(int), Stream()));
   IAllocatorUniquePtr<int> pivots = GetScratchBuffer<int>(rows * num_batches);
 
   utils::MLTypeCallDispatcher<float, double, MLFloat16> t_disp(input->GetElementType());
diff --git a/onnxruntime/contrib_ops/cuda/layer_norm.cc b/onnxruntime/contrib_ops/cuda/layer_norm.cc
index 50cb1c735ff8d..3095ebf4376af 100644
--- a/onnxruntime/contrib_ops/cuda/layer_norm.cc
+++ b/onnxruntime/contrib_ops/cuda/layer_norm.cc
@@ -61,6 +61,12 @@ Status LayerNorm<T, U, simplified>::ComputeInternal(OpKernelContext* ctx) const
   auto bias_data = (simplified || (nullptr == bias)) ? nullptr : reinterpret_cast<const CudaT*>(bias->template Data<T>());
 
   const TensorShape& x_shape = X->Shape();
+  // Sometimes due to conversion issue, the input 'X' has no data which is a case that cuda kernel cannot handle.
+  // Provide more error infomation here instead of CUDA errors.
+  if (X->SizeInBytes() == 0) {
+    return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, "Inputs 'X' has no data from upstream nodes");
+  }
+
   const int64_t axis = HandleNegativeAxis(axis_, x_shape.NumDimensions());
 
   int n1 = gsl::narrow<int>(x_shape.SizeToDimension(axis));
diff --git a/onnxruntime/core/optimizer/transpose_optimizer/transpose_optimizer.cc b/onnxruntime/core/optimizer/transpose_optimizer/transpose_optimizer.cc
index 419394e9cf937..eacc0e3fd3f1e 100644
--- a/onnxruntime/core/optimizer/transpose_optimizer/transpose_optimizer.cc
+++ b/onnxruntime/core/optimizer/transpose_optimizer/transpose_optimizer.cc
@@ -942,35 +942,35 @@ void PermuteInput(api::GraphRef& graph, api::NodeRef& node, size_t i, const std:
   node.SetInput(i, gather_output);
 }
 
-static bool HandleResize(HandlerArgs& args) {
-  auto inputs = args.node.Inputs();
-  int64_t rank_int = gsl::narrow_cast<int64_t>(args.perm.size());
-
-  if (args.ctx.opset < 11) {
-    PermuteInput(args.ctx.graph, args.node, 1, args.perm_inv);
-  } else {
-    if (inputs[1] != "") {
-      std::vector<int64_t> double_perm_inv = args.perm_inv;
-      double_perm_inv.reserve(2 * args.perm_inv.size());
-      for (int64_t p : args.perm_inv) {
-        double_perm_inv.push_back(p + rank_int);
-      }
-      PermuteInput(args.ctx.graph, args.node, 1, double_perm_inv);
-    }
-    for (size_t i = 2; i < inputs.size(); ++i) {
-      if (inputs[i] != "") {
-        PermuteInput(args.ctx.graph, args.node, i, args.perm_inv);
-      }
-    }
-  }
-
-  TransposeFirstInput(args.ctx, args.node, args.perm_inv);
-  TransposeOutputs(args.ctx, args.node, args.perm);
-
-  return true;
-}
+//static bool HandleResize(HandlerArgs& args) {
+//  auto inputs = args.node.Inputs();
+//  int64_t rank_int = gsl::narrow_cast<int64_t>(args.perm.size());
+//
+//  if (args.ctx.opset < 11) {
+//    PermuteInput(args.ctx.graph, args.node, 1, args.perm_inv);
+//  } else {
+//    if (inputs[1] != "") {
+//      std::vector<int64_t> double_perm_inv = args.perm_inv;
+//      double_perm_inv.reserve(2 * args.perm_inv.size());
+//      for (int64_t p : args.perm_inv) {
+//        double_perm_inv.push_back(p + rank_int);
+//      }
+//      PermuteInput(args.ctx.graph, args.node, 1, double_perm_inv);
+//    }
+//    for (size_t i = 2; i < inputs.size(); ++i) {
+//      if (inputs[i] != "") {
+//        PermuteInput(args.ctx.graph, args.node, i, args.perm_inv);
+//      }
+//    }
+//  }
+//
+//  TransposeFirstInput(args.ctx, args.node, args.perm_inv);
+//  TransposeOutputs(args.ctx, args.node, args.perm);
+//
+//  return true;
+//}
 
-constexpr HandlerInfo resize_handler = {&FirstInput, &HandleResize};
+// constexpr HandlerInfo resize_handler = {&FirstInput, &HandleResize};
 
 static bool HandlePad(HandlerArgs& args) {
   size_t rank = args.perm.size();
@@ -1563,7 +1563,9 @@ static const std::unordered_map<std::string_view, const HandlerInfo&> handler_ma
   {"Split", split_handler},
   {"Shape", shape_handler},
   {"Pad", pad_handler},
-  {"Resize", resize_handler},
+  // Todo: renable resize handler after adding NHWC support in upsample op on cpu
+  // https://github.com/microsoft/onnxruntime/issues/9857
+  //{"Resize", resize_handler},
   {"ReduceSum", reduce_sum_handler},
 
   {"ReduceLogSum", reduce_op_handler}, {"ReduceLogSumExp", reduce_op_handler}, {"ReduceMax", reduce_op_handler},
diff --git a/onnxruntime/core/providers/cuda/cuda_profiler.cc b/onnxruntime/core/providers/cuda/cuda_profiler.cc
index de9cbcc09f049..adf771fb23caf 100644
--- a/onnxruntime/core/providers/cuda/cuda_profiler.cc
+++ b/onnxruntime/core/providers/cuda/cuda_profiler.cc
@@ -1,6 +1,6 @@
 // Copyright (c) Microsoft Corporation. All rights reserved.
 // Licensed under the MIT License.
-#if !(defined(USE_ROCM) || defined(ENABLE_TRAINING))
+#if defined(USE_CUDA) && defined(ENABLE_CUDA_PROFILING)
 
 #include "cuda_profiler.h"
 #include <map>
diff --git a/onnxruntime/core/providers/cuda/cuda_profiler.h b/onnxruntime/core/providers/cuda/cuda_profiler.h
index 2ae6715009a9f..bd625a7c6ac3e 100644
--- a/onnxruntime/core/providers/cuda/cuda_profiler.h
+++ b/onnxruntime/core/providers/cuda/cuda_profiler.h
@@ -2,7 +2,7 @@
 // Licensed under the MIT License.
 #include "core/common/profiler_common.h"
 
-#if !(defined(USE_ROCM) || defined(ENABLE_TRAINING))
+#if defined(USE_CUDA) && defined(ENABLE_CUDA_PROFILING)
 
 #include "core/platform/ort_mutex.h"
 #include <cupti.h>
diff --git a/onnxruntime/core/providers/cuda/cuda_provider_factory.cc b/onnxruntime/core/providers/cuda/cuda_provider_factory.cc
index c390bc89543d2..45e10fa14c11a 100644
--- a/onnxruntime/core/providers/cuda/cuda_provider_factory.cc
+++ b/onnxruntime/core/providers/cuda/cuda_provider_factory.cc
@@ -130,9 +130,26 @@ struct ProviderInfo_CUDA_Impl : ProviderInfo_CUDA {
   }
 
   // Used by slice_concatenate_test.cc and onnxruntime_pybind_state.cc
-  void cudaMemcpy_HostToDevice(void* dst, const void* src, size_t count) override { CUDA_CALL_THROW(cudaMemcpy(dst, src, count, cudaMemcpyHostToDevice)); }
+
+  void cudaMemcpy_HostToDevice(void* dst, const void* src, size_t count) override {
+    // cudaMemcpy() operates on the default stream
+    CUDA_CALL_THROW(cudaMemcpy(dst, src, count, cudaMemcpyHostToDevice));
+
+    // To ensure that the copy has completed, invoke a stream sync for the default stream.
+    // https://docs.nvidia.com/cuda/cuda-runtime-api/api-sync-behavior.html#api-sync-behavior__memcpy-sync
+    // For transfers from pageable host memory to device memory, a stream sync is performed before the copy is initiated.
+    // The function will return once the pageable buffer has been copied to the staging memory for DMA transfer
+    // to device memory, but the DMA to final destination may not have completed.
+
+    CUDA_CALL_THROW(cudaStreamSynchronize(0));
+  }
+
   // Used by onnxruntime_pybind_state.cc
-  void cudaMemcpy_DeviceToHost(void* dst, const void* src, size_t count) override { CUDA_CALL_THROW(cudaMemcpy(dst, src, count, cudaMemcpyDeviceToHost)); }
+  void cudaMemcpy_DeviceToHost(void* dst, const void* src, size_t count) override {
+    // https://docs.nvidia.com/cuda/cuda-runtime-api/api-sync-behavior.html#api-sync-behavior__memcpy-sync
+    // For transfers from device to either pageable or pinned host memory, the function returns only once the copy has completed.
+    CUDA_CALL_THROW(cudaMemcpy(dst, src, count, cudaMemcpyDeviceToHost));
+  }
 
   int cudaGetDeviceCount() override {
     int num_devices = 0;
diff --git a/onnxruntime/core/providers/nuphar/scripts/rnn_benchmark.py b/onnxruntime/core/providers/nuphar/scripts/rnn_benchmark.py
index baa16888c6626..821a02cbb3dc2 100644
--- a/onnxruntime/core/providers/nuphar/scripts/rnn_benchmark.py
+++ b/onnxruntime/core/providers/nuphar/scripts/rnn_benchmark.py
@@ -121,7 +121,7 @@ def perf_test(rnn_type, num_threads, input_dim, hidden_dim, bidirectional, layer
         convert_to_scan_model(model_name, scan_model_name)
         # note that symbolic shape inference is needed because model has symbolic batch dim, thus init_state is ConstantOfShape
         onnx.save(SymbolicShapeInference.infer_shapes(onnx.load(scan_model_name)), scan_model_name)
-        sess = onnxruntime.InferenceSession(scan_model_name)
+        sess = onnxruntime.InferenceSession(scan_model_name, providers=onnxruntime.get_available_providers())
         count, duration, per_iter_cost = perf_run(sess, feeds, min_counts=top_n, min_duration_seconds=min_duration_seconds)
         avg_scan = top_n_avg(per_iter_cost, top_n)
         print('perf_scan (with {} threads) {}: run for {} iterations, top {} avg {:.3f} ms'.format(num_threads, scan_model_name, count, top_n, avg_scan))
@@ -131,7 +131,7 @@ def perf_test(rnn_type, num_threads, input_dim, hidden_dim, bidirectional, layer
         int8_model_name = os.path.splitext(model_name)[0] + '_int8.onnx'
         convert_matmul_model(scan_model_name, int8_model_name)
         onnx.save(SymbolicShapeInference.infer_shapes(onnx.load(int8_model_name)), int8_model_name)
-        sess = onnxruntime.InferenceSession(int8_model_name)
+        sess = onnxruntime.InferenceSession(int8_model_name, providers=onnxruntime.get_available_providers())
         count, duration, per_iter_cost = perf_run(sess, feeds, min_counts=top_n, min_duration_seconds=min_duration_seconds)
         avg_int8 = top_n_avg(per_iter_cost, top_n)
         print('perf_int8 (with {} threads) {}: run for {} iterations, top {} avg {:.3f} ms'.format(num_threads, int8_model_name, count, top_n, avg_int8))
diff --git a/onnxruntime/core/providers/openvino/ov_versions/data_ops.cc b/onnxruntime/core/providers/openvino/ov_versions/data_ops.cc
index d8669b55820eb..0549f7123967b 100644
--- a/onnxruntime/core/providers/openvino/ov_versions/data_ops.cc
+++ b/onnxruntime/core/providers/openvino/ov_versions/data_ops.cc
@@ -1022,8 +1022,16 @@ bool DataOps::op_is_supported(std::string name, std::vector<SupportedOp>& op_lis
             return true;
           }
 
-         //The operator to be marked true, it should be supported by all the devices specified with HETERO/MULTI/AUTO
-          if (device_id_.find("HETERO") == 0 || device_id_.find("MULTI") == 0 || device_id_.find("AUTO") == 0) {
+          //The operator to be marked true, it should be supported by either of the devices specified with HETERO
+          if (device_id_.find("HETERO") == 0) {
+              status = true;
+              if (device_id_.find(*it) != std::string::npos) {
+                return true;
+              }
+          }
+
+         //The operator to be marked true, it should be supported by all the devices specified with MULTI/AUTO
+          if (device_id_.find("MULTI") == 0 || device_id_.find("AUTO") == 0) {
               status = true;
               if (device_id_.find(*it) == std::string::npos) {
                 return false;
diff --git a/onnxruntime/python/onnxruntime_inference_collection.py b/onnxruntime/python/onnxruntime_inference_collection.py
index b0a86bce64f92..f753ccff31e6c 100644
--- a/onnxruntime/python/onnxruntime_inference_collection.py
+++ b/onnxruntime/python/onnxruntime_inference_collection.py
@@ -357,6 +357,7 @@ def _create_inference_session(self, providers, provider_options, disabled_optimi
                                                                         provider_options,
                                                                         available_providers)
         if providers == [] and len(available_providers) > 1:
+            self.disable_fallback()
             raise ValueError("This ORT build has {} enabled. ".format(available_providers) +
                              "Since ORT 1.9, you are required to explicitly set " +
                              "the providers parameter when instantiating InferenceSession. For example, "
diff --git a/onnxruntime/python/onnxruntime_pybind_state.cc b/onnxruntime/python/onnxruntime_pybind_state.cc
index 090440459c9eb..f7cc9d37c0462 100644
--- a/onnxruntime/python/onnxruntime_pybind_state.cc
+++ b/onnxruntime/python/onnxruntime_pybind_state.cc
@@ -505,8 +505,8 @@ std::unique_ptr<IExecutionProvider> CreateExecutionProviderInstance(
             return tensorrt_provider_factory->CreateProvider();
           }
         }
-        LOGS_DEFAULT(WARNING) << "Failed to register " << type << ". Please reference https://onnxruntime.ai/docs/execution-providers/TensorRT-ExecutionProvider.html#requirements to ensure all dependencies are met.";
     }
+    LOGS_DEFAULT(WARNING) << "Failed to create " << type << ". Please reference https://onnxruntime.ai/docs/execution-providers/TensorRT-ExecutionProvider.html#requirements to ensure all dependencies are met.";
 #endif
   } else if (type == kMIGraphXExecutionProvider) {
 #ifdef USE_MIGRAPHX
@@ -529,11 +529,10 @@ std::unique_ptr<IExecutionProvider> CreateExecutionProviderInstance(
       } else {
         if (!Env::Default().GetEnvironmentVar("CUDA_PATH").empty()) {
           ORT_THROW("CUDA_PATH is set but CUDA wasn't able to be loaded. Please install the correct version of CUDA and cuDNN as mentioned in the GPU requirements page (https://onnxruntime.ai/docs/reference/execution-providers/CUDA-ExecutionProvider.html#requirements), make sure they're in the PATH, and that your GPU is supported.");
-        } else {
-          LOGS_DEFAULT(WARNING) << "Failed to register " << type << ". Please reference https://onnxruntime.ai/docs/reference/execution-providers/CUDA-ExecutionProvider.html#requirements to ensure all dependencies are met.";
         }
       }
     }
+    LOGS_DEFAULT(WARNING) << "Failed to create " << type << ". Please reference https://onnxruntime.ai/docs/reference/execution-providers/CUDA-ExecutionProvider.html#requirements to ensure all dependencies are met.";
 #endif
   } else if (type == kRocmExecutionProvider) {
 #ifdef USE_ROCM
@@ -616,7 +615,7 @@ std::unique_ptr<IExecutionProvider> CreateExecutionProviderInstance(
       if (!Env::Default().GetEnvironmentVar("INTEL_OPENVINO_DIR").empty()) {
         ORT_THROW("INTEL_OPENVINO_DIR is set but OpenVINO library wasn't able to be loaded. Please install a supported version of OpenVINO as mentioned in the requirements page (https://onnxruntime.ai/docs/execution-providers/OpenVINO-ExecutionProvider.html#requirements), ensure dependency libraries are in the PATH and your hardware is supported.");
       } else {
-        LOGS_DEFAULT(WARNING) << "Failed to register " << type << ". Please reference https://onnxruntime.ai/docs/execution-providers/OpenVINO-ExecutionProvider.html#requirements to ensure all dependencies are met.";
+        LOGS_DEFAULT(WARNING) << "Failed to create " << type << ". Please reference https://onnxruntime.ai/docs/execution-providers/OpenVINO-ExecutionProvider.html#requirements to ensure all dependencies are met.";
       }
     }
 #endif
diff --git a/onnxruntime/python/tools/onnxruntime_test.py b/onnxruntime/python/tools/onnxruntime_test.py
index c1f809074c6ed..0d4cc22be3d96 100644
--- a/onnxruntime/python/tools/onnxruntime_test.py
+++ b/onnxruntime/python/tools/onnxruntime_test.py
@@ -71,7 +71,7 @@ def run_model(model_path,
         sess_options.enable_profiling = True
         sess_options.profile_file_prefix = os.path.basename(model_path)
 
-    sess = onnxrt.InferenceSession(model_path, sess_options)
+    sess = onnxrt.InferenceSession(model_path, sess_options=sess_options, providers=onnxrt.get_available_providers())
     meta = sess.get_modelmeta()
 
     if not feeds:
diff --git a/onnxruntime/python/tools/quantization/onnx_quantizer.py b/onnxruntime/python/tools/quantization/onnx_quantizer.py
index 38e3c5e30e4dd..aa0ee6156fa65 100644
--- a/onnxruntime/python/tools/quantization/onnx_quantizer.py
+++ b/onnxruntime/python/tools/quantization/onnx_quantizer.py
@@ -47,6 +47,8 @@ def __init__(self, model, per_channel, reduce_range, mode, static, weight_qType,
         is_weight_int8 = weight_qType == QuantType.QInt8
         self.is_weight_symmetric = is_weight_int8 if 'WeightSymmetric' not in self.extra_options else self.extra_options['WeightSymmetric']
         self.is_activation_symmetric = False if 'ActivationSymmetric' not in self.extra_options else self.extra_options['ActivationSymmetric']
+        self.op_types_support_per_channel_quantization = [] if 'OpTypesSupportPerChannelQuantization' not in extra_options \
+                                                        else extra_options['OpTypesSupportPerChannelQuantization']
 
         self.input_qType = onnx_proto.TensorProto.INT8 if input_qType == QuantType.QInt8 else onnx_proto.TensorProto.UINT8
         self.weight_qType = onnx_proto.TensorProto.INT8 if weight_qType == QuantType.QInt8 else onnx_proto.TensorProto.UINT8
diff --git a/onnxruntime/python/tools/quantization/operators/qdq_base_operator.py b/onnxruntime/python/tools/quantization/operators/qdq_base_operator.py
index f8f5546b1512b..ebe3b7c71a789 100644
--- a/onnxruntime/python/tools/quantization/operators/qdq_base_operator.py
+++ b/onnxruntime/python/tools/quantization/operators/qdq_base_operator.py
@@ -19,4 +19,10 @@ def quantize(self):
             nodes_to_iterate = itertools.chain(node.input, node.output)
 
         for tensor_name in nodes_to_iterate:
-            self.quantizer.quantize_tensor(tensor_name)
+            if self.quantizer.is_per_channel():
+                if node.op_type in self.quantizer.op_types_support_per_channel_quantization :
+                    self.quantizer.quantize_tensor_per_channel(tensor_name, self.quantizer.qdq_channel_axis)
+                else:
+                    self.quantizer.quantize_tensor(tensor_name)
+            else:
+                self.quantizer.quantize_tensor(tensor_name)
diff --git a/onnxruntime/python/tools/quantization/qdq_quantizer.py b/onnxruntime/python/tools/quantization/qdq_quantizer.py
index a14e220d3c33f..423e8d5c8d938 100644
--- a/onnxruntime/python/tools/quantization/qdq_quantizer.py
+++ b/onnxruntime/python/tools/quantization/qdq_quantizer.py
@@ -51,6 +51,15 @@ def __init__(self, model, per_channel, reduce_range, mode, static, weight_qType,
         self.add_qdq_pair_to_weight = False if 'AddQDQPairToWeight' not in extra_options \
                                         else extra_options['AddQDQPairToWeight'] 
 
+        # The default behavior is that multiple nodes can share a QDQ pair as their inputs. 
+        # In TRT, QDQ pair can’t be shared between nodes, so it will create dedicated QDQ pairs for each node. 
+        self.dedicated_qdq_pair = False if 'DedicatedQDQPair' not in extra_options else extra_options['DedicatedQDQPair'] 
+        if self.dedicated_qdq_pair:
+            self.tensor_to_its_receiving_nodes = {}
+
+        # Channel axis when per_channel is True
+        self.qdq_channel_axis = 0 if 'QDQChannelAxis' not in extra_options else extra_options['QDQChannelAxis']
+
     def quantize_tensor(self, tensor_name):
         weight = find_by_name(tensor_name, self.model.initializer())
         if weight is not None:
@@ -91,6 +100,14 @@ def remove_nodes(self):
         self.model.remove_nodes(self.nodes_to_remove)
 
     def quantize_model(self):
+        if self.dedicated_qdq_pair:
+            for node in self.model.nodes():
+                if self.should_quantize(node):
+                    for tensor_name in node.input:
+                        if tensor_name not in self.tensor_to_its_receiving_nodes:
+                            self.tensor_to_its_receiving_nodes[tensor_name] = []
+                        self.tensor_to_its_receiving_nodes[tensor_name].append(node)
+
         for node in self.model.nodes():
             if self.should_quantize(node):
                 op_quantizer = CreateQDQQuantizer(self, node)
@@ -156,30 +173,55 @@ def quantize_tensors(self):
                         "In static mode quantization params for inputs and outputs of nodes to be quantized are required."
                         .format(tensor_name))
 
-                q_input = tensor_name
-                q_output = tensor_name + "_QuantizeLinear"
-                dq_input = q_output
-                dq_output = tensor_name + "_DequantizeLinear"
-                if self.model.is_graph_output(tensor_name):
-                    q_input = tensor_name + "_QuantizeLinearInput"
-                    dq_output = tensor_name
-                    self.model.replace_output_of_all_nodes(tensor_name, q_input)
+                if self.dedicated_qdq_pair and tensor_name in self.tensor_to_its_receiving_nodes and len(self.tensor_to_its_receiving_nodes[tensor_name]) > 1:
+                    num_dedicated_qdq_pair = len(self.tensor_to_its_receiving_nodes[tensor_name])
+                    for i in range(num_dedicated_qdq_pair):
+                        postfix = str(i+1)
+                        q_input = tensor_name
+                        q_output = tensor_name + "_QuantizeLinear_" + postfix 
+                        dq_input = q_output
+                        dq_output = tensor_name + "_DequantizeLinear_" + postfix
+                        quant_node_name = tensor_name + "_QuantizeLinear_" + postfix
+                        dequant_node_name = tensor_name + "_DequantizeLinear_" + postfix
+                        qlinear_node = onnx.helper.make_node("QuantizeLinear", [q_input, scale_name, zp_name],
+                                                             [q_output], quant_node_name)
+                        dequant_node = onnx.helper.make_node("DequantizeLinear",
+                                                             [dq_input, scale_name, zp_name],
+                                                             [dq_output],
+                                                             dequant_node_name)
+                        self.model.add_nodes([qlinear_node, dequant_node])
+
+                        node = self.tensor_to_its_receiving_nodes[tensor_name][i]
+                        self.model.replace_node_input(node, tensor_name, dq_output)
+
+                    quantized_value = QuantizedValue(tensor_name, dq_output, scale_name, zp_name,
+                                                     QuantizedValueType.Input)
+                    self.quantized_value_map[tensor_name] = quantized_value
                 else:
-                    self.model.replace_input_of_all_nodes(tensor_name, dq_output)
+                    q_input = tensor_name
+                    q_output = tensor_name + "_QuantizeLinear"
+                    dq_input = q_output
+                    dq_output = tensor_name + "_DequantizeLinear"
+                    if self.model.is_graph_output(tensor_name):
+                        q_input = tensor_name + "_QuantizeLinearInput"
+                        dq_output = tensor_name
+                        self.model.replace_output_of_all_nodes(tensor_name, q_input)
+                    else:
+                        self.model.replace_input_of_all_nodes(tensor_name, dq_output)
 
-                quant_node_name = tensor_name + "_QuantizeLinear"
-                dequant_node_name = tensor_name + "_DequantizeLinear"
-                qlinear_node = onnx.helper.make_node("QuantizeLinear", [q_input, scale_name, zp_name],
-                                                     [q_output], quant_node_name)
-                dequant_node = onnx.helper.make_node("DequantizeLinear",
-                                                     [dq_input, scale_name, zp_name],
-                                                     [dq_output],
-                                                     dequant_node_name)
-                self.model.add_nodes([qlinear_node, dequant_node])
+                    quant_node_name = tensor_name + "_QuantizeLinear"
+                    dequant_node_name = tensor_name + "_DequantizeLinear"
+                    qlinear_node = onnx.helper.make_node("QuantizeLinear", [q_input, scale_name, zp_name],
+                                                         [q_output], quant_node_name)
+                    dequant_node = onnx.helper.make_node("DequantizeLinear",
+                                                         [dq_input, scale_name, zp_name],
+                                                         [dq_output],
+                                                         dequant_node_name)
+                    self.model.add_nodes([qlinear_node, dequant_node])
 
-                quantized_value = QuantizedValue(tensor_name, dq_output, scale_name, zp_name,
-                                                 QuantizedValueType.Input)
-                self.quantized_value_map[tensor_name] = quantized_value
+                    quantized_value = QuantizedValue(tensor_name, dq_output, scale_name, zp_name,
+                                                     QuantizedValueType.Input)
+                    self.quantized_value_map[tensor_name] = quantized_value
 
     def quantize_bias_tensors(self):
         for bias_name, input_name, weight_name in self.bias_to_quantize:
diff --git a/onnxruntime/python/tools/quantization/quantize.py b/onnxruntime/python/tools/quantization/quantize.py
index d72e372bbf2af..bc0a57a425507 100644
--- a/onnxruntime/python/tools/quantization/quantize.py
+++ b/onnxruntime/python/tools/quantization/quantize.py
@@ -198,6 +198,8 @@ def quantize_static(model_input,
                                               inserts both QuantizeLinear/DeQuantizeLinear nodes to weight.
             OpTypesToExcludeOutputQuantizatioin = list of op type : Default is []. If any op type is specified, it won't quantize  
                                                                     the output of ops with this specific op types.
+            DedicatedQDQPair = True/False : Default is False. When inserting QDQ pair, multiple nodes can share a single QDQ pair as their inputs.
+                                            If True, it will create identical and dedicated QDQ pair for each node. 
     '''
 
     mode = QuantizationMode.QLinearOps
diff --git a/onnxruntime/test/framework/inference_session_test.cc b/onnxruntime/test/framework/inference_session_test.cc
index 9d5fa3ec74d16..23e240a1342b2 100644
--- a/onnxruntime/test/framework/inference_session_test.cc
+++ b/onnxruntime/test/framework/inference_session_test.cc
@@ -661,7 +661,7 @@ TEST(InferenceSessionTests, CheckRunProfilerWithSessionOptions) {
     }
   }
 
-#if defined(USE_CUDA) && !defined(ENABLE_TRAINING) && defined(CUDA_VERSION) && CUDA_VERSION >= 11000
+#if defined(USE_CUDA) && defined(ENABLE_CUDA_PROFILING)
   ASSERT_TRUE(has_kernel_info);
 #endif
 }
diff --git a/onnxruntime/test/optimizer/transpose_optimizer_test.cc b/onnxruntime/test/optimizer/transpose_optimizer_test.cc
index c4afac1b02e17..a76843413e525 100644
--- a/onnxruntime/test/optimizer/transpose_optimizer_test.cc
+++ b/onnxruntime/test/optimizer/transpose_optimizer_test.cc
@@ -291,209 +291,212 @@ TEST(TransposeOptimizerTests, TestPadNonconst) {
                     /*opset_version*/ 11);
 }
 
-TEST(TransposeOptimizerTests, TestResize) {
-  auto build_test_case_1 = [&](ModelTestBuilder& builder) {
-    auto* input0_arg = MakeInput<float>(builder, {{4, -1, 2, -1}}, {4, 6, 2, 10}, 0.0, 1.0);
-    auto* const_1 = builder.MakeInitializer<float>({4}, {0.3f, 2.5f, 1.0f, 0.7f});
-    auto* transpose_1_out_0 = builder.MakeIntermediate();
-    auto* resize_1_out_0 = builder.MakeIntermediate();
-    auto* transpose_2_out_0 = builder.MakeOutput();
-
-    auto& transpose_1 = builder.AddNode("Transpose", {input0_arg}, {transpose_1_out_0});
-    transpose_1.AddAttribute("perm", std::vector<int64_t>{0, 3, 1, 2});
-    builder.AddNode("Resize", {transpose_1_out_0, const_1}, {resize_1_out_0});
-    auto& transpose_2 = builder.AddNode("Transpose", {resize_1_out_0}, {transpose_2_out_0});
-    transpose_2.AddAttribute("perm", std::vector<int64_t>{0, 2, 3, 1});
-  };
-
-  auto check_optimized_graph_1 = [&](InferenceSessionWrapper& session) {
-    int transpose_cost = EstimateTransposeCost(session.GetGraph());
-    EXPECT_EQ(transpose_cost, 0);
-  };
-
-  TransformerTester(build_test_case_1,
-                    check_optimized_graph_1,
-                    TransformerLevel::Default,
-                    TransformerLevel::Level1,
-                    /*opset_version*/ 10);
-}
-
-TEST(TransposeOptimizerTests, TestResizeOpset11) {
-  auto build_test_case_1 = [&](ModelTestBuilder& builder) {
-    auto* input0_arg = MakeInput<float>(builder, {{4, -1, 2, -1}}, {4, 6, 2, 10}, 0.0, 1.0);
-    auto* const_1 = builder.MakeInitializer<float>({8}, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f});
-    auto* const_2 = builder.MakeInitializer<float>({4}, {0.3f, 2.5f, 1.0f, 0.7f});
-    auto* transpose_1_out_0 = builder.MakeIntermediate();
-    auto* resize_1_out_0 = builder.MakeIntermediate();
-    auto* transpose_2_out_0 = builder.MakeOutput();
-
-    auto& transpose_1 = builder.AddNode("Transpose", {input0_arg}, {transpose_1_out_0});
-    transpose_1.AddAttribute("perm", std::vector<int64_t>{0, 3, 1, 2});
-    builder.AddNode("Resize", {transpose_1_out_0, const_1, const_2}, {resize_1_out_0});
-    auto& transpose_2 = builder.AddNode("Transpose", {resize_1_out_0}, {transpose_2_out_0});
-    transpose_2.AddAttribute("perm", std::vector<int64_t>{0, 2, 3, 1});
-  };
-
-  auto check_optimized_graph_1 = [&](InferenceSessionWrapper& session) {
-    int transpose_cost = EstimateTransposeCost(session.GetGraph());
-    EXPECT_EQ(transpose_cost, 0);
-  };
-
-  TransformerTester(build_test_case_1,
-                    check_optimized_graph_1,
-                    TransformerLevel::Default,
-                    TransformerLevel::Level1,
-                    /*opset_version*/ 11);
-}
-
-TEST(TransposeOptimizerTests, TestResizeOpset15) {
-  auto build_test_case_1 = [&](ModelTestBuilder& builder) {
-    auto* input0_arg = MakeInput<float>(builder, {{4, -1, 2, -1}}, {4, 6, 2, 10}, 0.0, 1.0);
-    auto* const_1 = builder.MakeInitializer<float>({4}, {0.3f, 2.5f, 1.0f, 0.7f});
-    auto* transpose_1_out_0 = builder.MakeIntermediate();
-    auto* resize_1_out_0 = builder.MakeIntermediate();
-    auto* transpose_2_out_0 = builder.MakeOutput();
-    auto empty_arg = NodeArg("", nullptr);
-
-    auto& transpose_1 = builder.AddNode("Transpose", {input0_arg}, {transpose_1_out_0});
-    transpose_1.AddAttribute("perm", std::vector<int64_t>{0, 3, 1, 2});
-    builder.AddNode("Resize", {transpose_1_out_0, &empty_arg, const_1}, {resize_1_out_0});
-    auto& transpose_2 = builder.AddNode("Transpose", {resize_1_out_0}, {transpose_2_out_0});
-    transpose_2.AddAttribute("perm", std::vector<int64_t>{0, 2, 3, 1});
-  };
-
-  auto check_optimized_graph_1 = [&](InferenceSessionWrapper& session) {
-    int transpose_cost = EstimateTransposeCost(session.GetGraph());
-    EXPECT_EQ(transpose_cost, 0);
-  };
-
-  TransformerTester(build_test_case_1,
-                    check_optimized_graph_1,
-                    TransformerLevel::Default,
-                    TransformerLevel::Level1,
-                    /*opset_version*/ 15);
-}
-
-TEST(TransposeOptimizerTests, TestResizeSizeRoi) {
-  auto build_test_case_1 = [&](ModelTestBuilder& builder) {
-    auto* input0_arg = MakeInput<float>(builder, {{4, -1, 2, -1}}, {4, 6, 2, 10}, 0.0, 1.0);
-    auto* const_1 = builder.MakeInitializer<float>({8}, {0.1f, 0.2f, 0.3f, 0.4f, 0.9f, 0.8f, 0.7f, 0.6f});
-    auto* const_2 = builder.MakeInitializer<int64_t>({4}, {10, 9, 8, 7});
-    auto* transpose_1_out_0 = builder.MakeIntermediate();
-    auto* resize_1_out_0 = builder.MakeIntermediate();
-    auto* transpose_2_out_0 = builder.MakeOutput();
-    auto empty_arg = NodeArg("", nullptr);
-
-    auto& transpose_1 = builder.AddNode("Transpose", {input0_arg}, {transpose_1_out_0});
-    transpose_1.AddAttribute("perm", std::vector<int64_t>{0, 3, 1, 2});
-    auto& resize_1 = builder.AddNode("Resize", {transpose_1_out_0, const_1, &empty_arg, const_2}, {resize_1_out_0});
-    resize_1.AddAttribute("coordinate_transformation_mode", "tf_crop_and_resize");
-    auto& transpose_2 = builder.AddNode("Transpose", {resize_1_out_0}, {transpose_2_out_0});
-    transpose_2.AddAttribute("perm", std::vector<int64_t>{0, 2, 3, 1});
-  };
-
-  auto check_optimized_graph_1 = [&](InferenceSessionWrapper& session) {
-    int transpose_cost = EstimateTransposeCost(session.GetGraph());
-    EXPECT_EQ(transpose_cost, 0);
-  };
-
-  TransformerTester(build_test_case_1,
-                    check_optimized_graph_1,
-                    TransformerLevel::Default,
-                    TransformerLevel::Level1,
-                    /*opset_version*/ 15);
-}
-
-TEST(TransposeOptimizerTests, TestResizeRoiScalesZeroRank0) {
-  auto build_test_case_1 = [&](ModelTestBuilder& builder) {
-    auto* input = builder.MakeInput<uint8_t>({1, 512, 512, 3},
-                                             std::numeric_limits<uint8_t>::min(),
-                                             std::numeric_limits<uint8_t>::max());
-    auto* resize_in_roi = builder.MakeInitializer<float>({0}, {});
-    auto* resize_in_scales = builder.MakeInitializer<float>({0}, {});
-    auto* resize_in_sizes = builder.MakeInitializer<int64_t>({4}, {1, 256, 32, 32});
-
-    auto* transpose1_out_transposed = builder.MakeIntermediate();
-    auto* resize_out_Y = builder.MakeIntermediate();
-    auto* output = builder.MakeOutput();
-
-    auto& transpose_1 = builder.AddNode("Transpose", {input}, {transpose1_out_transposed});
-    transpose_1.AddAttribute("perm", std::vector<int64_t>{0, 3, 1, 2});
-    builder.AddNode("Resize",
-                    {transpose1_out_transposed, resize_in_roi, resize_in_scales, resize_in_sizes},
-                    {resize_out_Y});
-    auto& transpose_2 = builder.AddNode("Transpose", {resize_out_Y}, {output});
-    transpose_2.AddAttribute("perm", std::vector<int64_t>{0, 2, 3, 1});
-  };
-
-  auto check_optimized_graph_1 = [&](InferenceSessionWrapper& session) {
-    int transpose_cost = EstimateTransposeCost(session.GetGraph());
-    EXPECT_EQ(transpose_cost, 0);
-  };
-
-  TransformerTester(build_test_case_1,
-                    check_optimized_graph_1,
-                    TransformerLevel::Default,
-                    TransformerLevel::Level1);
-}
-
-TEST(TransposeOptimizerTests, TestResizeNonconst) {
-  auto build_test_case_1 = [&](ModelTestBuilder& builder) {
-    auto* input0_arg = MakeInput<float>(builder, {{4, -1, 2, -1}}, {4, 6, 2, 10}, 0.0, 1.0);
-    auto* input1_arg = MakeInput<float>(builder, {{8}}, {8}, {0.1f, 0.2f, 0.3f, 0.4f, 0.9f, 0.8f, 0.7f, 0.6f});
-    auto* input2_arg = MakeInput<float>(builder, {{4}}, {4}, {0.3f, 2.5f, 1.0f, 0.7f});
-    auto* transpose_1_out_0 = builder.MakeIntermediate();
-    auto* resize_1_out_0 = builder.MakeIntermediate();
-    auto* transpose_2_out_0 = builder.MakeOutput();
-
-    auto& transpose_1 = builder.AddNode("Transpose", {input0_arg}, {transpose_1_out_0});
-    transpose_1.AddAttribute("perm", std::vector<int64_t>{0, 3, 1, 2});
-    auto& resize_1 = builder.AddNode("Resize", {transpose_1_out_0, input1_arg, input2_arg}, {resize_1_out_0});
-    resize_1.AddAttribute("coordinate_transformation_mode", "tf_crop_and_resize");
-    auto& transpose_2 = builder.AddNode("Transpose", {resize_1_out_0}, {transpose_2_out_0});
-    transpose_2.AddAttribute("perm", std::vector<int64_t>{0, 2, 3, 1});
-  };
-
-  auto check_optimized_graph_1 = [&](InferenceSessionWrapper& session) {
-    int transpose_cost = EstimateTransposeCost(session.GetGraph());
-    EXPECT_EQ(transpose_cost, 0);
-  };
-
-  TransformerTester(build_test_case_1,
-                    check_optimized_graph_1,
-                    TransformerLevel::Default,
-                    TransformerLevel::Level1,
-                    /*opset_version*/ 11);
-}
-
-TEST(TransposeOptimizerTests, TestResizeNonconstOpset13) {
-  auto build_test_case_1 = [&](ModelTestBuilder& builder) {
-    auto* input0_arg = MakeInput<float>(builder, {{4, -1, 2, -1}}, {4, 6, 2, 10}, 0.0, 1.0);
-    auto* input1_arg = MakeInput<float>(builder, {{8}}, {8}, {0.1f, 0.2f, 0.3f, 0.4f, 0.9f, 0.8f, 0.7f, 0.6f});
-    auto* input2_arg = MakeInput<float>(builder, {{4}}, {4}, {0.3f, 2.5f, 1.0f, 0.7f});
-    auto* transpose_1_out_0 = builder.MakeIntermediate();
-    auto* resize_1_out_0 = builder.MakeIntermediate();
-    auto* transpose_2_out_0 = builder.MakeOutput();
-
-    auto& transpose_1 = builder.AddNode("Transpose", {input0_arg}, {transpose_1_out_0});
-    transpose_1.AddAttribute("perm", std::vector<int64_t>{0, 3, 1, 2});
-    auto& resize_1 = builder.AddNode("Resize", {transpose_1_out_0, input1_arg, input2_arg}, {resize_1_out_0});
-    resize_1.AddAttribute("coordinate_transformation_mode", "tf_crop_and_resize");
-    auto& transpose_2 = builder.AddNode("Transpose", {resize_1_out_0}, {transpose_2_out_0});
-    transpose_2.AddAttribute("perm", std::vector<int64_t>{0, 2, 3, 1});
-  };
-
-  auto check_optimized_graph_1 = [&](InferenceSessionWrapper& session) {
-    int transpose_cost = EstimateTransposeCost(session.GetGraph());
-    EXPECT_EQ(transpose_cost, 0);
-  };
-
-  TransformerTester(build_test_case_1,
-                    check_optimized_graph_1,
-                    TransformerLevel::Default,
-                    TransformerLevel::Level1,
-                    /*opset_version*/ 13);
-}
+// Todo: renable tests on resize transformer after adding NHWC support in upsample op on cpu
+// https://github.com/microsoft/onnxruntime/issues/9857
+
+//TEST(TransposeOptimizerTests, TestResize) {
+//  auto build_test_case_1 = [&](ModelTestBuilder& builder) {
+//    auto* input0_arg = MakeInput<float>(builder, {{4, -1, 2, -1}}, {4, 6, 2, 10}, 0.0, 1.0);
+//    auto* const_1 = builder.MakeInitializer<float>({4}, {0.3f, 2.5f, 1.0f, 0.7f});
+//    auto* transpose_1_out_0 = builder.MakeIntermediate();
+//    auto* resize_1_out_0 = builder.MakeIntermediate();
+//    auto* transpose_2_out_0 = builder.MakeOutput();
+//
+//    auto& transpose_1 = builder.AddNode("Transpose", {input0_arg}, {transpose_1_out_0});
+//    transpose_1.AddAttribute("perm", std::vector<int64_t>{0, 3, 1, 2});
+//    builder.AddNode("Resize", {transpose_1_out_0, const_1}, {resize_1_out_0});
+//    auto& transpose_2 = builder.AddNode("Transpose", {resize_1_out_0}, {transpose_2_out_0});
+//    transpose_2.AddAttribute("perm", std::vector<int64_t>{0, 2, 3, 1});
+//  };
+//
+//  auto check_optimized_graph_1 = [&](InferenceSessionWrapper& session) {
+//    int transpose_cost = EstimateTransposeCost(session.GetGraph());
+//    EXPECT_EQ(transpose_cost, 0);
+//  };
+//
+//  TransformerTester(build_test_case_1,
+//                    check_optimized_graph_1,
+//                    TransformerLevel::Default,
+//                    TransformerLevel::Level1,
+//                    /*opset_version*/ 10);
+//}
+//
+//TEST(TransposeOptimizerTests, TestResizeOpset11) {
+//  auto build_test_case_1 = [&](ModelTestBuilder& builder) {
+//    auto* input0_arg = MakeInput<float>(builder, {{4, -1, 2, -1}}, {4, 6, 2, 10}, 0.0, 1.0);
+//    auto* const_1 = builder.MakeInitializer<float>({8}, {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f});
+//    auto* const_2 = builder.MakeInitializer<float>({4}, {0.3f, 2.5f, 1.0f, 0.7f});
+//    auto* transpose_1_out_0 = builder.MakeIntermediate();
+//    auto* resize_1_out_0 = builder.MakeIntermediate();
+//    auto* transpose_2_out_0 = builder.MakeOutput();
+//
+//    auto& transpose_1 = builder.AddNode("Transpose", {input0_arg}, {transpose_1_out_0});
+//    transpose_1.AddAttribute("perm", std::vector<int64_t>{0, 3, 1, 2});
+//    builder.AddNode("Resize", {transpose_1_out_0, const_1, const_2}, {resize_1_out_0});
+//    auto& transpose_2 = builder.AddNode("Transpose", {resize_1_out_0}, {transpose_2_out_0});
+//    transpose_2.AddAttribute("perm", std::vector<int64_t>{0, 2, 3, 1});
+//  };
+//
+//  auto check_optimized_graph_1 = [&](InferenceSessionWrapper& session) {
+//    int transpose_cost = EstimateTransposeCost(session.GetGraph());
+//    EXPECT_EQ(transpose_cost, 0);
+//  };
+//
+//  TransformerTester(build_test_case_1,
+//                    check_optimized_graph_1,
+//                    TransformerLevel::Default,
+//                    TransformerLevel::Level1,
+//                    /*opset_version*/ 11);
+//}
+//
+//TEST(TransposeOptimizerTests, TestResizeOpset15) {
+//  auto build_test_case_1 = [&](ModelTestBuilder& builder) {
+//    auto* input0_arg = MakeInput<float>(builder, {{4, -1, 2, -1}}, {4, 6, 2, 10}, 0.0, 1.0);
+//    auto* const_1 = builder.MakeInitializer<float>({4}, {0.3f, 2.5f, 1.0f, 0.7f});
+//    auto* transpose_1_out_0 = builder.MakeIntermediate();
+//    auto* resize_1_out_0 = builder.MakeIntermediate();
+//    auto* transpose_2_out_0 = builder.MakeOutput();
+//    auto empty_arg = NodeArg("", nullptr);
+//
+//    auto& transpose_1 = builder.AddNode("Transpose", {input0_arg}, {transpose_1_out_0});
+//    transpose_1.AddAttribute("perm", std::vector<int64_t>{0, 3, 1, 2});
+//    builder.AddNode("Resize", {transpose_1_out_0, &empty_arg, const_1}, {resize_1_out_0});
+//    auto& transpose_2 = builder.AddNode("Transpose", {resize_1_out_0}, {transpose_2_out_0});
+//    transpose_2.AddAttribute("perm", std::vector<int64_t>{0, 2, 3, 1});
+//  };
+//
+//  auto check_optimized_graph_1 = [&](InferenceSessionWrapper& session) {
+//    int transpose_cost = EstimateTransposeCost(session.GetGraph());
+//    EXPECT_EQ(transpose_cost, 0);
+//  };
+//
+//  TransformerTester(build_test_case_1,
+//                    check_optimized_graph_1,
+//                    TransformerLevel::Default,
+//                    TransformerLevel::Level1,
+//                    /*opset_version*/ 15);
+//}
+//
+//TEST(TransposeOptimizerTests, TestResizeSizeRoi) {
+//  auto build_test_case_1 = [&](ModelTestBuilder& builder) {
+//    auto* input0_arg = MakeInput<float>(builder, {{4, -1, 2, -1}}, {4, 6, 2, 10}, 0.0, 1.0);
+//    auto* const_1 = builder.MakeInitializer<float>({8}, {0.1f, 0.2f, 0.3f, 0.4f, 0.9f, 0.8f, 0.7f, 0.6f});
+//    auto* const_2 = builder.MakeInitializer<int64_t>({4}, {10, 9, 8, 7});
+//    auto* transpose_1_out_0 = builder.MakeIntermediate();
+//    auto* resize_1_out_0 = builder.MakeIntermediate();
+//    auto* transpose_2_out_0 = builder.MakeOutput();
+//    auto empty_arg = NodeArg("", nullptr);
+//
+//    auto& transpose_1 = builder.AddNode("Transpose", {input0_arg}, {transpose_1_out_0});
+//    transpose_1.AddAttribute("perm", std::vector<int64_t>{0, 3, 1, 2});
+//    auto& resize_1 = builder.AddNode("Resize", {transpose_1_out_0, const_1, &empty_arg, const_2}, {resize_1_out_0});
+//    resize_1.AddAttribute("coordinate_transformation_mode", "tf_crop_and_resize");
+//    auto& transpose_2 = builder.AddNode("Transpose", {resize_1_out_0}, {transpose_2_out_0});
+//    transpose_2.AddAttribute("perm", std::vector<int64_t>{0, 2, 3, 1});
+//  };
+//
+//  auto check_optimized_graph_1 = [&](InferenceSessionWrapper& session) {
+//    int transpose_cost = EstimateTransposeCost(session.GetGraph());
+//    EXPECT_EQ(transpose_cost, 0);
+//  };
+//
+//  TransformerTester(build_test_case_1,
+//                    check_optimized_graph_1,
+//                    TransformerLevel::Default,
+//                    TransformerLevel::Level1,
+//                    /*opset_version*/ 15);
+//}
+//
+//TEST(TransposeOptimizerTests, TestResizeRoiScalesZeroRank0) {
+//  auto build_test_case_1 = [&](ModelTestBuilder& builder) {
+//    auto* input = builder.MakeInput<uint8_t>({1, 512, 512, 3},
+//                                             std::numeric_limits<uint8_t>::min(),
+//                                             std::numeric_limits<uint8_t>::max());
+//    auto* resize_in_roi = builder.MakeInitializer<float>({0}, {});
+//    auto* resize_in_scales = builder.MakeInitializer<float>({0}, {});
+//    auto* resize_in_sizes = builder.MakeInitializer<int64_t>({4}, {1, 256, 32, 32});
+//
+//    auto* transpose1_out_transposed = builder.MakeIntermediate();
+//    auto* resize_out_Y = builder.MakeIntermediate();
+//    auto* output = builder.MakeOutput();
+//
+//    auto& transpose_1 = builder.AddNode("Transpose", {input}, {transpose1_out_transposed});
+//    transpose_1.AddAttribute("perm", std::vector<int64_t>{0, 3, 1, 2});
+//    builder.AddNode("Resize",
+//                    {transpose1_out_transposed, resize_in_roi, resize_in_scales, resize_in_sizes},
+//                    {resize_out_Y});
+//    auto& transpose_2 = builder.AddNode("Transpose", {resize_out_Y}, {output});
+//    transpose_2.AddAttribute("perm", std::vector<int64_t>{0, 2, 3, 1});
+//  };
+//
+//  auto check_optimized_graph_1 = [&](InferenceSessionWrapper& session) {
+//    int transpose_cost = EstimateTransposeCost(session.GetGraph());
+//    EXPECT_EQ(transpose_cost, 0);
+//  };
+//
+//  TransformerTester(build_test_case_1,
+//                    check_optimized_graph_1,
+//                    TransformerLevel::Default,
+//                    TransformerLevel::Level1);
+//}
+//
+//TEST(TransposeOptimizerTests, TestResizeNonconst) {
+//  auto build_test_case_1 = [&](ModelTestBuilder& builder) {
+//    auto* input0_arg = MakeInput<float>(builder, {{4, -1, 2, -1}}, {4, 6, 2, 10}, 0.0, 1.0);
+//    auto* input1_arg = MakeInput<float>(builder, {{8}}, {8}, {0.1f, 0.2f, 0.3f, 0.4f, 0.9f, 0.8f, 0.7f, 0.6f});
+//    auto* input2_arg = MakeInput<float>(builder, {{4}}, {4}, {0.3f, 2.5f, 1.0f, 0.7f});
+//    auto* transpose_1_out_0 = builder.MakeIntermediate();
+//    auto* resize_1_out_0 = builder.MakeIntermediate();
+//    auto* transpose_2_out_0 = builder.MakeOutput();
+//
+//    auto& transpose_1 = builder.AddNode("Transpose", {input0_arg}, {transpose_1_out_0});
+//    transpose_1.AddAttribute("perm", std::vector<int64_t>{0, 3, 1, 2});
+//    auto& resize_1 = builder.AddNode("Resize", {transpose_1_out_0, input1_arg, input2_arg}, {resize_1_out_0});
+//    resize_1.AddAttribute("coordinate_transformation_mode", "tf_crop_and_resize");
+//    auto& transpose_2 = builder.AddNode("Transpose", {resize_1_out_0}, {transpose_2_out_0});
+//    transpose_2.AddAttribute("perm", std::vector<int64_t>{0, 2, 3, 1});
+//  };
+//
+//  auto check_optimized_graph_1 = [&](InferenceSessionWrapper& session) {
+//    int transpose_cost = EstimateTransposeCost(session.GetGraph());
+//    EXPECT_EQ(transpose_cost, 0);
+//  };
+//
+//  TransformerTester(build_test_case_1,
+//                    check_optimized_graph_1,
+//                    TransformerLevel::Default,
+//                    TransformerLevel::Level1,
+//                    /*opset_version*/ 11);
+//}
+//
+//TEST(TransposeOptimizerTests, TestResizeNonconstOpset13) {
+//  auto build_test_case_1 = [&](ModelTestBuilder& builder) {
+//    auto* input0_arg = MakeInput<float>(builder, {{4, -1, 2, -1}}, {4, 6, 2, 10}, 0.0, 1.0);
+//    auto* input1_arg = MakeInput<float>(builder, {{8}}, {8}, {0.1f, 0.2f, 0.3f, 0.4f, 0.9f, 0.8f, 0.7f, 0.6f});
+//    auto* input2_arg = MakeInput<float>(builder, {{4}}, {4}, {0.3f, 2.5f, 1.0f, 0.7f});
+//    auto* transpose_1_out_0 = builder.MakeIntermediate();
+//    auto* resize_1_out_0 = builder.MakeIntermediate();
+//    auto* transpose_2_out_0 = builder.MakeOutput();
+//
+//    auto& transpose_1 = builder.AddNode("Transpose", {input0_arg}, {transpose_1_out_0});
+//    transpose_1.AddAttribute("perm", std::vector<int64_t>{0, 3, 1, 2});
+//    auto& resize_1 = builder.AddNode("Resize", {transpose_1_out_0, input1_arg, input2_arg}, {resize_1_out_0});
+//    resize_1.AddAttribute("coordinate_transformation_mode", "tf_crop_and_resize");
+//    auto& transpose_2 = builder.AddNode("Transpose", {resize_1_out_0}, {transpose_2_out_0});
+//    transpose_2.AddAttribute("perm", std::vector<int64_t>{0, 2, 3, 1});
+//  };
+//
+//  auto check_optimized_graph_1 = [&](InferenceSessionWrapper& session) {
+//    int transpose_cost = EstimateTransposeCost(session.GetGraph());
+//    EXPECT_EQ(transpose_cost, 0);
+//  };
+//
+//  TransformerTester(build_test_case_1,
+//                    check_optimized_graph_1,
+//                    TransformerLevel::Default,
+//                    TransformerLevel::Level1,
+//                    /*opset_version*/ 13);
+//}
 
 TEST(TransposeOptimizerTests, TestAdd) {
   auto build_test_case_1 = [&](ModelTestBuilder& builder) {
diff --git a/onnxruntime/test/python/onnxruntime_test_python.py b/onnxruntime/test/python/onnxruntime_test_python.py
index e8005f5270dad..8618431ec77ed 100644
--- a/onnxruntime/test/python/onnxruntime_test_python.py
+++ b/onnxruntime/test/python/onnxruntime_test_python.py
@@ -884,7 +884,19 @@ def test_session_with_ortvalue_input(ortvalue):
 
             # The constructed OrtValue should still be valid after being used in a session
             self.assertTrue(np.array_equal(ortvalue2.numpy(), numpy_arr_input))
-            
+
+    def testOrtValue_ghIssue9799(self):
+        if 'CUDAExecutionProvider' in onnxrt.get_available_providers():
+            session = onnxrt.InferenceSession(get_name("identity_9799.onnx"), 
+                                              providers=onnxrt.get_available_providers())
+
+            for seq_length in range(40, 200):
+                inps = np.ones((seq_length, 16, 7, 5, 3, 3)).astype(np.float32)
+                ort_val = onnxrt.OrtValue.ortvalue_from_numpy(inps, 'cuda', 0)
+                upstreams_onnxrt = {'input': ort_val}
+                outs = session.run(output_names=['output'], input_feed=upstreams_onnxrt)[0]
+                self.assertTrue(np.allclose(inps, outs))
+
     def testSparseTensorCooFormat(self):
         cpu_device = onnxrt.OrtDevice.make('cpu', 0)
         shape = [9,9]
diff --git a/onnxruntime/test/python/quantization/test_qdq.py b/onnxruntime/test/python/quantization/test_qdq.py
index 445ff858c0018..d8d4280e37ac6 100644
--- a/onnxruntime/test/python/quantization/test_qdq.py
+++ b/onnxruntime/test/python/quantization/test_qdq.py
@@ -10,7 +10,7 @@
 import onnx
 import numpy as np
 from onnx import helper, TensorProto
-from onnxruntime.quantization import quantize_static, QuantType, QuantFormat
+from onnxruntime.quantization import quantize_static, QuantType, QuantFormat, QuantizationMode, QDQQuantizer
 from op_test_utils import TestDataFeeds, check_model_correctness, check_op_type_count, check_op_type_order
 
 class TestQDQFormat(unittest.TestCase):
@@ -24,6 +24,177 @@ def input_feeds(self, n, name2shape):
         dr = TestDataFeeds(input_data_list)
         return dr
 
+class TestQDQExtraOptions(unittest.TestCase):
+    def test_qdq_extra_options(self):
+        #   (input) 
+        #      |    
+        #     Add 
+        #      |
+        #     ReduceMean 
+        #      |
+        #     Add 
+        #      |
+        #   (output)
+
+        initializers = []
+
+        input_tensor = helper.make_tensor_value_info('L', TensorProto.FLOAT, [5, 5])
+        output_tensor = helper.make_tensor_value_info('O', TensorProto.FLOAT, [5, 5])
+
+        add_weight_data_1 = np.random.normal(0, 0.1, [5, 5]).astype(np.float32)
+        initializers.append(onnx.numpy_helper.from_array(add_weight_data_1, name="M"))
+        add_weight_data_2 = np.random.normal(0, 0.1, [5, 5]).astype(np.float32)
+        initializers.append(onnx.numpy_helper.from_array(add_weight_data_2, name="N"))
+
+        add_node_1 = onnx.helper.make_node('Add', ['L', 'M'], ['P'], name='Add1')
+        reduce_mean_node = onnx.helper.make_node('ReduceMean', ['P'], ['Q'], keepdims=1, name='ReduceMean')
+        add_node_2 = onnx.helper.make_node('Add', ['Q', 'N'], ['O'], name='Add2')
+
+        graph = helper.make_graph([add_node_1, reduce_mean_node, add_node_2], 'QDQ_Test_Finetune', [input_tensor], [output_tensor], initializer=initializers)
+        model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
+        test_model_path = './test_qdq_finetune.onnx'
+        onnx.save(model, test_model_path)
+
+        compute_range = {
+            'P': [0.1, 0.1],
+            'Q': [0.1, 0.1],
+            'M': [0.1, 0.1],
+            'N': [0.1, 0.1],
+            'L': [0.1, 0.1],
+            'O': [0.1, 0.1],
+        }
+
+        op_types_to_quantize = ['Add']
+
+        mode = QuantizationMode.QLinearOps
+        model = onnx.load_model(test_model_path, False)
+        quantizer = QDQQuantizer(
+            model,
+            True, #per_channel
+            False, #reduce_range
+            mode,
+            True,  #static
+            QuantType.QInt8, #weight_type
+            QuantType.QInt8, #activation_type
+            compute_range,
+            [], #nodes_to_quantize
+            ['Add2'], #nodes_to_exclude
+            op_types_to_quantize,
+            {'ActivationSymmetric' : True, 'AddQDQPairToWeight' : True, 'OpTypesToExcludeOutputQuantizatioin': []}) #extra_options
+        quantizer.quantize_model()
+        qdq_model_path = './test_qdq_finetune_qdq.onnx'
+        quantizer.model.save_model_to_file(qdq_model_path, False)
+
+        # QDQ pair should be added to Add1 but not Add2
+        # QDQ pair shoud be added to Add1 output as well.
+        qdq_added_to_node_output_flag = False 
+        for node in quantizer.model.nodes():
+            if node.name == 'Add1':
+                for input in node.input:
+                    self.assertTrue("DequantizeLinear" in input)
+                for output in node.output:
+                    self.assertTrue("QuantizeLinear" not in output)
+
+            if node.name == 'Add2':
+                for input in node.input:
+                    self.assertTrue("DequantizeLinear" not in input)
+                for output in node.output:
+                    self.assertTrue("QuantizeLinear" not in output)
+
+            # This QuantizeLinear node should be followed by Add1
+            if node.name == 'P_QuantizeLinear':
+                qdq_added_to_node_output_flag = True
+                self.assertTrue(node.input[0] is 'P')
+
+        self.assertTrue(qdq_added_to_node_output_flag)
+
+
+    def test_qdq_extra_options_2(self):
+        #         (input) 
+        #           |    
+        #          Add 
+        #       /   |   \
+        #  MatMul MatMul MatMul 
+        #     |     |      |
+        # (output)(output)(output)
+
+        initializers = []
+
+        input_tensor = helper.make_tensor_value_info('L', TensorProto.FLOAT, [5, 5])
+        output_tensor1 = helper.make_tensor_value_info('M', TensorProto.FLOAT, [5, 5])
+        output_tensor2 = helper.make_tensor_value_info('N', TensorProto.FLOAT, [5, 5])
+        output_tensor3 = helper.make_tensor_value_info('O', TensorProto.FLOAT, [5, 5])
+
+        add_weight_data = np.random.normal(0, 0.1, [5, 5]).astype(np.float32)
+        initializers.append(onnx.numpy_helper.from_array(add_weight_data, name="P"))
+        matmul_weight_data_1 = np.random.normal(0, 0.1, [5, 5]).astype(np.float32)
+        initializers.append(onnx.numpy_helper.from_array(matmul_weight_data_1, name="Q"))
+        matmul_weight_data_2 = np.random.normal(0, 0.1, [5, 5]).astype(np.float32)
+        initializers.append(onnx.numpy_helper.from_array(matmul_weight_data_2, name="R"))
+        matmul_weight_data_3 = np.random.normal(0, 0.1, [5, 5]).astype(np.float32)
+        initializers.append(onnx.numpy_helper.from_array(matmul_weight_data_2, name="S"))
+
+        add_node = onnx.helper.make_node('Add', ['L', 'P'], ['T'], name='Add')
+        matmul_node_1 = onnx.helper.make_node('MatMul', ['T', 'Q'], ['M'], name='MatMul1')
+        matmul_node_2 = onnx.helper.make_node('MatMul', ['T', 'R'], ['N'], name='MatMul2')
+        matmul_node_3 = onnx.helper.make_node('MatMul', ['T', 'S'], ['O'], name='MatMul3')
+
+        graph = helper.make_graph([add_node, matmul_node_1, matmul_node_2, matmul_node_3], 'QDQ_Test_Finetune_2', [input_tensor], [output_tensor1, output_tensor2, output_tensor3], initializer=initializers)
+        model = helper.make_model(graph, opset_imports=[helper.make_opsetid("", 13)])
+        test_model_path = './test_qdq_finetune_2.onnx'
+        onnx.save(model, test_model_path)
+
+        compute_range = {
+            'L': [0.1, 0.1],
+            'M': [0.1, 0.1],
+            'N': [0.1, 0.1],
+            'O': [0.1, 0.1],
+            'P': [0.1, 0.1],
+            'Q': [0.1, 0.1],
+            'R': [0.1, 0.1],
+            'S': [0.1, 0.1],
+            'T': [0.1, 0.1],
+        }
+
+        op_types_to_quantize = ['Add', 'MatMul']
+
+        mode = QuantizationMode.QLinearOps
+        model = onnx.load_model(test_model_path, False)
+        quantizer = QDQQuantizer(
+            model,
+            True, #per_channel
+            False, #reduce_range
+            mode,
+            True,  #static
+            QuantType.QInt8, #weight_type
+            QuantType.QInt8, #activation_type
+            compute_range,
+            [], #nodes_to_quantize
+            ['Add'], #nodes_to_exclude
+            op_types_to_quantize,
+            {'ActivationSymmetric' : True, 'AddQDQPairToWeight' : True, 'OpTypesToExcludeOutputQuantizatioin': op_types_to_quantize, 'DedicatedQDQPair': True}) #extra_options
+        quantizer.quantize_model()
+        qdq_model_path = './test_qdq_finetune_qdq_2.onnx'
+        quantizer.model.save_model_to_file(qdq_model_path, False)
+
+        # Three dedicated QDQ pair should be generated and feed into each MatMul node
+        # Also QDQ pair should not be added to Add node 
+        # QDQ pair shoud not be added to node's output
+        for node in quantizer.model.nodes():
+            if node.name == 'MatMul1':
+                self.assertTrue("T_DequantizeLinear_1" in node.input)
+            if node.name == 'MatMul2':
+                self.assertTrue("T_DequantizeLinear_2" in node.input)
+            if node.name == 'MatMul3':
+                self.assertTrue("T_DequantizeLinear_3" in node.input)
+            if node.name == 'Add':
+                for input in node.input:
+                    self.assertTrue("DequantizeLinear" not in input)
+
+            # QDQ pair shoud not be added to MatMul's output
+            if node.op_type == 'QuantizeLinear':
+                self.assertTrue(node.input[0] not in ['M_QuantizeLinearInput', 'N_QuantizeLinearInput', 'O_QuantizeLinearInput']) 
+
 class TestQDQFormatConv(TestQDQFormat):
     def construct_model_conv(self, output_model_path, input_shape, weight_shape, output_shape, has_bias):
         #    (input)
diff --git a/onnxruntime/test/shared_lib/test_inference.cc b/onnxruntime/test/shared_lib/test_inference.cc
index 0a48df4f0bed3..73e436263c820 100644
--- a/onnxruntime/test/shared_lib/test_inference.cc
+++ b/onnxruntime/test/shared_lib/test_inference.cc
@@ -196,6 +196,8 @@ static constexpr PATH_TYPE PYOP_MULTI_MODEL_URI = TSTR("testdata/pyop_2.onnx");
 static constexpr PATH_TYPE PYOP_KWARG_MODEL_URI = TSTR("testdata/pyop_3.onnx");
 #endif
 
+static constexpr PATH_TYPE RESIZE_AND_CROP_MODEL_URI = TSTR("testdata/crop_and_resize.onnx");
+
 class CApiTestWithProvider : public testing::Test, public ::testing::WithParamInterface<int> {
 };
 
@@ -1883,5 +1885,42 @@ TEST(CApiTest, TestPerSessionCustomThreadPoolHooks) {
   ASSERT_TRUE(custom_join_hook_called == (thread_count - 1) << 1);
 }
 
+// Preventing resize tranformer issue:
+// https://github.com/microsoft/onnxruntime/issues/9857
+TEST(CApiTest, crop_and_resize) {
+  std::vector<float> input_value_0;
+  input_value_0.resize(2 * 36 * 36 * 3);
+  for (int i = 0; i < 36 * 36 * 3; ++i) {
+    input_value_0[i] = 1.f;
+    input_value_0[i + 36 * 36 * 3] = 2.f;
+  }
+  std::vector<int64_t> input_shape_0{2, 36, 36, 3};
+
+  std::vector<int32_t> input_value_1{1, 0};
+  std::vector<int64_t> input_shape_1{2};
+
+  std::vector<const char*> input_names{"input:0", "input2:0"};
+  Ort::MemoryInfo info("Cpu", OrtDeviceAllocator, 0, OrtMemTypeDefault);
+
+  std::vector<Ort::Value> ort_inputs;
+  ort_inputs.emplace_back(Ort::Value::CreateTensor<float>(info, input_value_0.data(), input_value_0.size(), input_shape_0.data(), input_shape_0.size()));
+  ort_inputs.emplace_back(Ort::Value::CreateTensor<int32_t>(info, input_value_1.data(), input_value_1.size(), input_shape_1.data(), input_shape_1.size()));
+
+  Ort::SessionOptions session_options;
+  Ort::Session session(*ort_env, RESIZE_AND_CROP_MODEL_URI, session_options);
+
+  const char* output_names[] = {"output:0"};
+  std::vector<int64_t> output_shape{2, 20, 20, 3};
+
+  std::vector<Ort::Value> ort_outputs = session.Run(Ort::RunOptions{}, input_names.data(), ort_inputs.data(), ort_inputs.size(), output_names, countof(output_names));
+  ASSERT_EQ(ort_outputs.size(), 1U);
+  const auto& output_0 = ort_outputs[0];
+  ASSERT_TRUE(output_0.IsTensor());
+
+  auto output_type_shape = output_0.GetTensorTypeAndShapeInfo();
+  ASSERT_EQ(ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT, output_type_shape.GetElementType());
+  ASSERT_EQ(output_shape, output_type_shape.GetShape());
+}
+
 }  // namespace TestPerSessionCustomThreadHooks
 #endif
diff --git a/onnxruntime/test/testdata/crop_and_resize.onnx b/onnxruntime/test/testdata/crop_and_resize.onnx
new file mode 100644
index 0000000000000..72c31fcd6a040
Binary files /dev/null and b/onnxruntime/test/testdata/crop_and_resize.onnx differ
diff --git a/onnxruntime/test/testdata/identity_9799.onnx b/onnxruntime/test/testdata/identity_9799.onnx
new file mode 100644
index 0000000000000..c54fa57c6157b
--- /dev/null
+++ b/onnxruntime/test/testdata/identity_9799.onnx
@@ -0,0 +1,20 @@
+pytorch1.10:´
+%
+inputoutput
+Identity_0"Identitytorch-jit-exportZ;
+input2
+0,
+input_dynamic_axes_1
+
+
+
+
+b<
+output2
+0,
+input_dynamic_axes_1
+
+
+
+
+B
\ No newline at end of file
diff --git a/tools/ci_build/build.py b/tools/ci_build/build.py
index 77dc793ed098c..35a95c2c90e99 100644
--- a/tools/ci_build/build.py
+++ b/tools/ci_build/build.py
@@ -567,6 +567,10 @@ def parse_arguments():
         "--test_external_transformer_example", action='store_true',
         help="run the example external transformer test, mainly used in CI pipeline.")
 
+    parser.add_argument(
+        "--enable_cuda_profiling", action='store_true', help="enable cuda kernel profiling, \
+        cupti library must be added to PATH beforehand.")
+
     return parser.parse_args()
 
 
@@ -817,6 +821,7 @@ def generate_build_tree(cmake_path, source_dir, build_dir, cuda_home, cudnn_home
         "-Donnxruntime_ENABLE_EXTERNAL_CUSTOM_OP_SCHEMAS=" + ("ON" if args.enable_external_custom_op_schemas
                                                               else "OFF"),
         "-Donnxruntime_NVCC_THREADS=" + str(args.parallel),
+        "-Donnxruntime_ENABLE_CUDA_PROFILING=" + ("ON" if args.enable_cuda_profiling else "OFF"),
     ]
     if args.external_graph_transformer_path:
         cmake_args.append("-Donnxruntime_EXTERNAL_TRANSFORMER_SRC_PATH=" + args.external_graph_transformer_path)
diff --git a/tools/ci_build/github/azure-pipelines/linux-gpu-ci-pipeline.yml b/tools/ci_build/github/azure-pipelines/linux-gpu-ci-pipeline.yml
index 0a4c360f5cd48..59cdd99fc7473 100644
--- a/tools/ci_build/github/azure-pipelines/linux-gpu-ci-pipeline.yml
+++ b/tools/ci_build/github/azure-pipelines/linux-gpu-ci-pipeline.yml
@@ -38,6 +38,7 @@ jobs:
               --parallel \
               --build_wheel \
               --enable_onnx_tests --use_cuda --cuda_version=11.4 --cuda_home=/usr/local/cuda-11.4 --cudnn_home=/usr/local/cuda-11.4 \
+              --enable_cuda_profiling \
               --enable_pybind --build_java \
               --cmake_extra_defines CMAKE_CUDA_HOST_COMPILER=/opt/rh/devtoolset-10/root/usr/bin/cc  CMAKE_CUDA_ARCHITECTURES=52
       workingDirectory: $(Build.SourcesDirectory)
diff --git a/tools/ci_build/github/azure-pipelines/win-gpu-ci-pipeline.yml b/tools/ci_build/github/azure-pipelines/win-gpu-ci-pipeline.yml
index 7f73482a7540a..a2d9dc304897c 100644
--- a/tools/ci_build/github/azure-pipelines/win-gpu-ci-pipeline.yml
+++ b/tools/ci_build/github/azure-pipelines/win-gpu-ci-pipeline.yml
@@ -13,7 +13,7 @@ stages:
       strategy:
         matrix:
           cuda:
-            additionalBuildFlags: --build_java --build_nodejs --use_cuda --cuda_version=11.4 --cuda_home="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.4" --cudnn_home="C:\local\cudnn-11.4-windows-x64-v8.2.2.26\cuda" --cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=52 --gen_doc validate
+            additionalBuildFlags: --build_java --build_nodejs --use_cuda --cuda_version=11.4 --cuda_home="C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.4" --cudnn_home="C:\local\cudnn-11.4-windows-x64-v8.2.2.26\cuda" --cmake_extra_defines CMAKE_CUDA_ARCHITECTURES=52 --gen_doc validate --enable_cuda_profiling
             EnvSetupScript: setup_env_cuda_11.bat
             ORT_EP_NAME: CUDA
           dml:
diff --git a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_openvino b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_openvino
index eb46ecebdc320..2c65ed4cb53fb 100644
--- a/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_openvino
+++ b/tools/ci_build/github/linux/docker/Dockerfile.ubuntu_openvino
@@ -12,7 +12,7 @@ RUN apt update && apt install -y libnuma1 ocl-icd-libopencl1 && \
 
 WORKDIR /root
 
-ENV INTEL_OPENVINO_DIR /opt/intel/openvino_${OPENVINO_VERSION}.689
+ENV INTEL_OPENVINO_DIR /opt/intel/openvino_${OPENVINO_VERSION}.752
 ENV LD_LIBRARY_PATH $INTEL_OPENVINO_DIR/deployment_tools/inference_engine/lib/intel64:$INTEL_OPENVINO_DIR/deployment_tools/ngraph/lib:$INTEL_OPENVINO_DIR/deployment_tools/inference_engine/external/tbb/lib:/usr/local/openblas/lib:$LD_LIBRARY_PATH
 ENV InferenceEngine_DIR $INTEL_OPENVINO_DIR/deployment_tools/inference_engine/share
 ENV ngraph_DIR $INTEL_OPENVINO_DIR/deployment_tools/ngraph/cmake
@@ -26,7 +26,7 @@ RUN wget https://apt.repos.intel.com/openvino/2021/GPG-PUB-KEY-INTEL-OPENVINO-20
     cd /etc/apt/sources.list.d && \
     echo "deb https://apt.repos.intel.com/openvino/2021 all main">intel-openvino-2021.list && \
     apt update && \ 
-    apt install -y intel-openvino-dev-ubuntu18-2021.4.689 && \
+    apt install -y intel-openvino-dev-ubuntu18-2021.4.752 && \
     cd ${INTEL_OPENVINO_DIR}/install_dependencies && ./install_openvino_dependencies.sh -y
 
 RUN wget https://github.com/intel/compute-runtime/releases/download/19.41.14441/intel-gmmlib_19.3.2_amd64.deb && \