Skip to content

Conversation

peri044
Copy link
Collaborator

@peri044 peri044 commented Aug 7, 2022

Description

Handle dtype mismatch in elementwise ops

Type of change

  • Bug fix (non-breaking change which fixes an issue)

Checklist:

  • My code follows the style guidelines of this project (You can use the linters)
  • I have performed a self-review of my own code
  • [ x I have commented my code, particularly in hard-to-understand areas and hacks
  • I have made corresponding changes to the documentation
  • I have added tests to verify my fix or my feature
  • New and existing unit tests pass locally with my changes
  • I have added the relevant labels to my PR in so that relevant reviewers are notified

peri044 added 2 commits August 6, 2022 23:34
Signed-off-by: Dheeraj Peri <peri.dheeraj@gmail.com>
Signed-off-by: Dheeraj Peri <peri.dheeraj@gmail.com>
@peri044 peri044 requested a review from narendasan August 7, 2022 06:45
@peri044 peri044 added the component: converters Issues re: Specific op converters label Aug 7, 2022
@github-actions github-actions bot added component: conversion Issues re: Conversion stage component: core Issues re: The core compiler labels Aug 7, 2022
@github-actions github-actions bot requested a review from bowang007 August 7, 2022 06:46
Copy link

@github-actions github-actions bot left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There are some changes that do not conform to C++ style guidelines:

diff --git a/workspace/core/conversion/evaluators/aten.cpp b/tmp/changes.txt
index 4d8795f..c8c2c00 100644
--- a/workspace/core/conversion/evaluators/aten.cpp
+++ b/tmp/changes.txt
@@ -184,7 +184,7 @@ auto aten_registrations TORCHTRT_UNUSED =

                      int64_t start = 0;
                      auto startIVal = args.at(n->input(1)).IValue();
-                      if(!startIVal->isNone()){
+                      if (!startIVal->isNone()) {
                        start = args.at(n->input(1)).unwrapToInt();
                      }
                      int64_t end = args.at(n->input(2)).unwrapToInt();
diff --git a/workspace/core/conversion/converters/converter_util.cpp b/tmp/changes.txt
index ee8a589..bc6abdd 100644
--- a/workspace/core/conversion/converters/converter_util.cpp
+++ b/tmp/changes.txt
@@ -118,9 +118,11 @@ nvinfer1::ILayer* add_elementwise(
  }

  // Two types are compatible if they are the same type or are both in the set {kFLOAT, kHALF}
-  auto fp32_and_fp16 = (self->getType() == nvinfer1::DataType::kFLOAT) && (other->getType() == nvinfer1::DataType::kHALF);
-  auto fp16_and_fp32 = (other->getType() == nvinfer1::DataType::kFLOAT) && (self->getType() == nvinfer1::DataType::kHALF);
-  if (!fp32_and_fp16 && !fp16_and_fp32){
+  auto fp32_and_fp16 =
+      (self->getType() == nvinfer1::DataType::kFLOAT) && (other->getType() == nvinfer1::DataType::kHALF);
+  auto fp16_and_fp32 =
+      (other->getType() == nvinfer1::DataType::kFLOAT) && (self->getType() == nvinfer1::DataType::kHALF);
+  if (!fp32_and_fp16 && !fp16_and_fp32) {
    if (self->getType() > other->getType()) {
      self = castITensor(ctx, self, other->getType());
    } else if (self->getType() < other->getType()) {
diff --git a/workspace/core/conversion/converters/impl/unary.cpp b/tmp/changes.txt
index a1d03a3..6b0ee2b 100644
--- a/workspace/core/conversion/converters/impl/unary.cpp
+++ b/tmp/changes.txt
@@ -10,45 +10,41 @@ namespace converters {
namespace impl {
namespace {

-
auto abs_registration TORCHTRT_UNUSED = RegisterNodeConversionPatterns().pattern(
-  {"aten::abs (Tensor self) -> Tensor",
-  [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
-    auto in = args[0].ITensor();
-    bool unary_supported_input = in->getType() == nvinfer1::DataType::kFLOAT
-     || in->getType() == nvinfer1::DataType::kHALF
-     || in->getType() == nvinfer1::DataType::kINT8;
-    if(unary_supported_input){
-      auto unary_layer = ctx->net->addUnary(*in, nvinfer1::UnaryOperation::kABS);
-      TORCHTRT_CHECK(unary_layer, "Unable to create abs layer from node: " << *n);
-      unary_layer->setName(util::node_info(n).c_str());
-      auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], unary_layer->getOutput(0));
-      LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
-      return true;
-    }
-    else{
-      //For types not supported by kABS, use an elementwise implementation abs(x) = max(x, -1 * x)
-      at::Tensor neg_one = torch::full({1}, -1).to(util::TRTDataTypeToScalarType(in->getType()));
-      auto neg_one_const = tensor_to_const(ctx, neg_one);
-      auto neg_layer = add_elementwise(
+    {"aten::abs (Tensor self) -> Tensor", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
+       auto in = args[0].ITensor();
+       bool unary_supported_input = in->getType() == nvinfer1::DataType::kFLOAT ||
+           in->getType() == nvinfer1::DataType::kHALF || in->getType() == nvinfer1::DataType::kINT8;
+       if (unary_supported_input) {
+         auto unary_layer = ctx->net->addUnary(*in, nvinfer1::UnaryOperation::kABS);
+         TORCHTRT_CHECK(unary_layer, "Unable to create abs layer from node: " << *n);
+         unary_layer->setName(util::node_info(n).c_str());
+         auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], unary_layer->getOutput(0));
+         LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
+         return true;
+       } else {
+         // For types not supported by kABS, use an elementwise implementation abs(x) = max(x, -1 * x)
+         at::Tensor neg_one = torch::full({1}, -1).to(util::TRTDataTypeToScalarType(in->getType()));
+         auto neg_one_const = tensor_to_const(ctx, neg_one);
+         auto neg_layer = add_elementwise(
             ctx,
             nvinfer1::ElementWiseOperation::kPROD,
             in,
             neg_one_const,
             util::node_info(n) + std::string("_Negation"));
-      TORCHTRT_CHECK(neg_layer, "Unable to create prod layer from node: " << *n);
-      auto max_layer = add_elementwise(
+         TORCHTRT_CHECK(neg_layer, "Unable to create prod layer from node: " << *n);
+         auto max_layer = add_elementwise(
             ctx,
             nvinfer1::ElementWiseOperation::kMAX,
             in,
             neg_layer->getOutput(0),
             util::node_info(n) + std::string("_Max"));
-      TORCHTRT_CHECK(max_layer, "Unable to create max layer from node: " << *n);
-      auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], max_layer->getOutput(0));
-      LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
-      return true;
-    }
-  }});
+         TORCHTRT_CHECK(max_layer, "Unable to create max layer from node: " << *n);
+         auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], max_layer->getOutput(0));
+         LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
+         return true;
+       }
+     }});

#define convert(unary, trt_type)                                                               \
  auto unary##_registrations TORCHTRT_UNUSED = RegisterNodeConversionPatterns().pattern(       \
diff --git a/workspace/tests/core/conversion/converters/test_element_wise.cpp b/tmp/changes.txt
index 994fb25..540fa12 100644
--- a/workspace/tests/core/conversion/converters/test_element_wise.cpp
+++ b/tmp/changes.txt
@@ -27,8 +27,8 @@ void pointwise_test_helper(
  if (!singleInput) {
    torch_inputs.push_back(at::randint(1, 5, shape2, {at::kCUDA}));
  }
-  if(int_tensors){
-    for(size_t i = 0UL; i < torch_inputs.size(); ++i){
+  if (int_tensors) {
+    for (size_t i = 0UL; i < torch_inputs.size(); ++i) {
      torch_inputs[i] = torch_inputs[i].to(at::kInt);
    }
  }
diff --git a/workspace/tests/core/conversion/converters/test_unary.cpp b/tmp/changes.txt
index a7ab3bb..1d40c3c 100644
--- a/workspace/tests/core/conversion/converters/test_unary.cpp
+++ b/tmp/changes.txt
@@ -1,9 +1,9 @@
#include <string>
-#include "torch/torch.h"
#include "core/compiler.h"
#include "gtest/gtest.h"
#include "tests/util/util.h"
#include "torch/csrc/jit/ir/irparser.h"
+#include "torch/torch.h"

namespace {
std::string gen_test_graph(const std::string& unary) {
@@ -22,7 +22,7 @@ TEST(Converters, ATenAbsIntConvertsCorrectly) {

  auto in = at::tensor({-1, 1, -2, 2, -3, 3}, {at::kCUDA}).to(torch::kInt32);
  auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
-  auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in});  
+  auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in});

  in = at::clone(in);
  params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
diff --git a/workspace/tests/core/conversion/converters/test_select.cpp b/tmp/changes.txt
index 03b6bda..67b760a 100644
--- a/workspace/tests/core/conversion/converters/test_select.cpp
+++ b/tmp/changes.txt
@@ -376,7 +376,7 @@ TEST(Converters, ATenSliceListConvertsCorrectly) {
          %slice : Tensor[] = aten::slice(%list, %1, %2, %3)
          %out.1 : Tensor, %out.2 : Tensor = prim::ListUnpack(%slice)
          return (%out.1, %out.2))IR";
-  
+
  auto g = std::make_shared<torch::jit::Graph>();

  torch::jit::parseIR(graph, g.get());
ERROR: Some files do not conform to style guidelines

Copy link

@github-actions github-actions bot left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There are some changes that do not conform to C++ style guidelines:

diff --git a/workspace/core/conversion/evaluators/aten.cpp b/tmp/changes.txt
index 4d8795f..c8c2c00 100644
--- a/workspace/core/conversion/evaluators/aten.cpp
+++ b/tmp/changes.txt
@@ -184,7 +184,7 @@ auto aten_registrations TORCHTRT_UNUSED =

                      int64_t start = 0;
                      auto startIVal = args.at(n->input(1)).IValue();
-                      if(!startIVal->isNone()){
+                      if (!startIVal->isNone()) {
                        start = args.at(n->input(1)).unwrapToInt();
                      }
                      int64_t end = args.at(n->input(2)).unwrapToInt();
diff --git a/workspace/core/conversion/converters/converter_util.cpp b/tmp/changes.txt
index ee8a589..bc6abdd 100644
--- a/workspace/core/conversion/converters/converter_util.cpp
+++ b/tmp/changes.txt
@@ -118,9 +118,11 @@ nvinfer1::ILayer* add_elementwise(
  }

  // Two types are compatible if they are the same type or are both in the set {kFLOAT, kHALF}
-  auto fp32_and_fp16 = (self->getType() == nvinfer1::DataType::kFLOAT) && (other->getType() == nvinfer1::DataType::kHALF);
-  auto fp16_and_fp32 = (other->getType() == nvinfer1::DataType::kFLOAT) && (self->getType() == nvinfer1::DataType::kHALF);
-  if (!fp32_and_fp16 && !fp16_and_fp32){
+  auto fp32_and_fp16 =
+      (self->getType() == nvinfer1::DataType::kFLOAT) && (other->getType() == nvinfer1::DataType::kHALF);
+  auto fp16_and_fp32 =
+      (other->getType() == nvinfer1::DataType::kFLOAT) && (self->getType() == nvinfer1::DataType::kHALF);
+  if (!fp32_and_fp16 && !fp16_and_fp32) {
    if (self->getType() > other->getType()) {
      self = castITensor(ctx, self, other->getType());
    } else if (self->getType() < other->getType()) {
diff --git a/workspace/core/conversion/converters/impl/unary.cpp b/tmp/changes.txt
index a1d03a3..6b0ee2b 100644
--- a/workspace/core/conversion/converters/impl/unary.cpp
+++ b/tmp/changes.txt
@@ -10,45 +10,41 @@ namespace converters {
namespace impl {
namespace {

-
auto abs_registration TORCHTRT_UNUSED = RegisterNodeConversionPatterns().pattern(
-  {"aten::abs (Tensor self) -> Tensor",
-  [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
-    auto in = args[0].ITensor();
-    bool unary_supported_input = in->getType() == nvinfer1::DataType::kFLOAT
-     || in->getType() == nvinfer1::DataType::kHALF
-     || in->getType() == nvinfer1::DataType::kINT8;
-    if(unary_supported_input){
-      auto unary_layer = ctx->net->addUnary(*in, nvinfer1::UnaryOperation::kABS);
-      TORCHTRT_CHECK(unary_layer, "Unable to create abs layer from node: " << *n);
-      unary_layer->setName(util::node_info(n).c_str());
-      auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], unary_layer->getOutput(0));
-      LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
-      return true;
-    }
-    else{
-      //For types not supported by kABS, use an elementwise implementation abs(x) = max(x, -1 * x)
-      at::Tensor neg_one = torch::full({1}, -1).to(util::TRTDataTypeToScalarType(in->getType()));
-      auto neg_one_const = tensor_to_const(ctx, neg_one);
-      auto neg_layer = add_elementwise(
+    {"aten::abs (Tensor self) -> Tensor", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
+       auto in = args[0].ITensor();
+       bool unary_supported_input = in->getType() == nvinfer1::DataType::kFLOAT ||
+           in->getType() == nvinfer1::DataType::kHALF || in->getType() == nvinfer1::DataType::kINT8;
+       if (unary_supported_input) {
+         auto unary_layer = ctx->net->addUnary(*in, nvinfer1::UnaryOperation::kABS);
+         TORCHTRT_CHECK(unary_layer, "Unable to create abs layer from node: " << *n);
+         unary_layer->setName(util::node_info(n).c_str());
+         auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], unary_layer->getOutput(0));
+         LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
+         return true;
+       } else {
+         // For types not supported by kABS, use an elementwise implementation abs(x) = max(x, -1 * x)
+         at::Tensor neg_one = torch::full({1}, -1).to(util::TRTDataTypeToScalarType(in->getType()));
+         auto neg_one_const = tensor_to_const(ctx, neg_one);
+         auto neg_layer = add_elementwise(
             ctx,
             nvinfer1::ElementWiseOperation::kPROD,
             in,
             neg_one_const,
             util::node_info(n) + std::string("_Negation"));
-      TORCHTRT_CHECK(neg_layer, "Unable to create prod layer from node: " << *n);
-      auto max_layer = add_elementwise(
+         TORCHTRT_CHECK(neg_layer, "Unable to create prod layer from node: " << *n);
+         auto max_layer = add_elementwise(
             ctx,
             nvinfer1::ElementWiseOperation::kMAX,
             in,
             neg_layer->getOutput(0),
             util::node_info(n) + std::string("_Max"));
-      TORCHTRT_CHECK(max_layer, "Unable to create max layer from node: " << *n);
-      auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], max_layer->getOutput(0));
-      LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
-      return true;
-    }
-  }});
+         TORCHTRT_CHECK(max_layer, "Unable to create max layer from node: " << *n);
+         auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], max_layer->getOutput(0));
+         LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
+         return true;
+       }
+     }});

#define convert(unary, trt_type)                                                               \
  auto unary##_registrations TORCHTRT_UNUSED = RegisterNodeConversionPatterns().pattern(       \
diff --git a/workspace/tests/core/conversion/converters/test_element_wise.cpp b/tmp/changes.txt
index 994fb25..540fa12 100644
--- a/workspace/tests/core/conversion/converters/test_element_wise.cpp
+++ b/tmp/changes.txt
@@ -27,8 +27,8 @@ void pointwise_test_helper(
  if (!singleInput) {
    torch_inputs.push_back(at::randint(1, 5, shape2, {at::kCUDA}));
  }
-  if(int_tensors){
-    for(size_t i = 0UL; i < torch_inputs.size(); ++i){
+  if (int_tensors) {
+    for (size_t i = 0UL; i < torch_inputs.size(); ++i) {
      torch_inputs[i] = torch_inputs[i].to(at::kInt);
    }
  }
diff --git a/workspace/tests/core/conversion/converters/test_unary.cpp b/tmp/changes.txt
index a7ab3bb..1d40c3c 100644
--- a/workspace/tests/core/conversion/converters/test_unary.cpp
+++ b/tmp/changes.txt
@@ -1,9 +1,9 @@
#include <string>
-#include "torch/torch.h"
#include "core/compiler.h"
#include "gtest/gtest.h"
#include "tests/util/util.h"
#include "torch/csrc/jit/ir/irparser.h"
+#include "torch/torch.h"

namespace {
std::string gen_test_graph(const std::string& unary) {
@@ -22,7 +22,7 @@ TEST(Converters, ATenAbsIntConvertsCorrectly) {

  auto in = at::tensor({-1, 1, -2, 2, -3, 3}, {at::kCUDA}).to(torch::kInt32);
  auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
-  auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in});  
+  auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in});

  in = at::clone(in);
  params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
diff --git a/workspace/tests/core/conversion/converters/test_select.cpp b/tmp/changes.txt
index 03b6bda..67b760a 100644
--- a/workspace/tests/core/conversion/converters/test_select.cpp
+++ b/tmp/changes.txt
@@ -376,7 +376,7 @@ TEST(Converters, ATenSliceListConvertsCorrectly) {
          %slice : Tensor[] = aten::slice(%list, %1, %2, %3)
          %out.1 : Tensor, %out.2 : Tensor = prim::ListUnpack(%slice)
          return (%out.1, %out.2))IR";
-  
+
  auto g = std::make_shared<torch::jit::Graph>();

  torch::jit::parseIR(graph, g.get());
ERROR: Some files do not conform to style guidelines

@narendasan
Copy link
Collaborator

@peri044 I have this fix in the collections branch with extra tests, take a look

Copy link

@github-actions github-actions bot left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There are some changes that do not conform to C++ style guidelines:

diff --git a/workspace/py/torch_tensorrt/csrc/register_tensorrt_classes.cpp b/tmp/changes.txt
index 9db567c..ba07250 100644
--- a/workspace/py/torch_tensorrt/csrc/register_tensorrt_classes.cpp
+++ b/tmp/changes.txt
@@ -26,11 +26,11 @@ void RegisterTRTCompileSpec() {
  static auto TORCHTRT_UNUSED TRTInputSignatureTSRegistration =
      torch::class_<torch_tensorrt::pyapi::InputSignature>("tensorrt", "_InputSignature")
          .def(torch::init<>())
-          .def("_set_signature_ivalue_torchbind",
-                [](const c10::intrusive_ptr<torch_tensorrt::pyapi::InputSignature>& self,
-                  torch::jit::IValue ival) {
-                    self->signature_ivalue = ival;
-                })
+          .def(
+              "_set_signature_ivalue_torchbind",
+              [](const c10::intrusive_ptr<torch_tensorrt::pyapi::InputSignature>& self, torch::jit::IValue ival) {
+                self->signature_ivalue = ival;
+              })
          .def("__str__", &torch_tensorrt::pyapi::InputSignature::to_str);

  ADD_FIELD_GET_SET_REGISTRATION(
diff --git a/workspace/core/conversion/evaluators/aten.cpp b/tmp/changes.txt
index ca9ff4d..aa58dc7 100644
--- a/workspace/core/conversion/evaluators/aten.cpp
+++ b/tmp/changes.txt
@@ -176,7 +176,7 @@ auto aten_registrations TORCHTRT_UNUSED =

                      int64_t start = 0;
                      auto startIVal = args.at(n->input(1)).IValue();
-                      if(!startIVal->isNone()){
+                      if (!startIVal->isNone()) {
                        start = args.at(n->input(1)).unwrapToInt();
                      }
                      int64_t end = args.at(n->input(2)).unwrapToInt();
diff --git a/workspace/core/conversion/evaluators/eval_util.cpp b/tmp/changes.txt
index 742a4f4..c14f9a6 100644
--- a/workspace/core/conversion/evaluators/eval_util.cpp
+++ b/tmp/changes.txt
@@ -20,7 +20,6 @@ int64_t normalizeIndex(int64_t idx, int64_t list_size) {
  return idx;
}

-
// TODO: Switch back to PyTorch canonical implimentation
c10::optional<torch::jit::IValue> toIValue(const torch::jit::Value* v) {
  if (v->node()->kind() != torch::jit::prim::Constant || v->type()->cast<c10::FunctionType>()) {
diff --git a/workspace/core/conversion/evaluators/prim.cpp b/tmp/changes.txt
index 338c427..244c3e8 100644
--- a/workspace/core/conversion/evaluators/prim.cpp
+++ b/tmp/changes.txt
@@ -260,40 +260,39 @@ auto prim_registrations =
                    },
                    EvalOptions().validSchemas({"prim::shape(Tensor a) -> (int[])"})})
        .evaluator({torch::jit::prim::TupleConstruct,
-                  [](const torch::jit::Node* n, kwargs& args) -> c10::optional<torch::jit::IValue> {
-                    auto num_inputs = n->inputs().size();
-                    c10::IValue tuple = c10::ivalue::Tuple::create();
-                    switch (num_inputs) {
-                      case 0:
-                        tuple = c10::ivalue::Tuple::create();
-                        break;
-                      case 1:
-                        tuple = c10::ivalue::Tuple::create(std::move((*args.at(n->input(0)).IValue())));
-                        break;
-                      case 2: {
-                        tuple = c10::ivalue::Tuple::create(
-                            std::move(*(args.at(n->input(0)).IValue())),
-                            std::move(*(args.at(n->input(1)).IValue())));
-                        break;
-                      }
-                      case 3: {
-                        tuple = c10::ivalue::Tuple::create(
-                            std::move(*(args.at(n->input(0)).IValue())),
-                            std::move(*(args.at(n->input(1)).IValue())),
-                            std::move(*(args.at(n->input(2)).IValue())));
-                        break;
-                      }
-                      default: {
-                        std::vector<c10::IValue> elems;
-                        for (size_t i = 0; i < num_inputs; i++) {
-                          elems.push_back(*(args.at(n->input(i)).IValue()));
+                    [](const torch::jit::Node* n, kwargs& args) -> c10::optional<torch::jit::IValue> {
+                      auto num_inputs = n->inputs().size();
+                      c10::IValue tuple = c10::ivalue::Tuple::create();
+                      switch (num_inputs) {
+                        case 0:
+                          tuple = c10::ivalue::Tuple::create();
+                          break;
+                        case 1:
+                          tuple = c10::ivalue::Tuple::create(std::move((*args.at(n->input(0)).IValue())));
+                          break;
+                        case 2: {
+                          tuple = c10::ivalue::Tuple::create(
+                              std::move(*(args.at(n->input(0)).IValue())), std::move(*(args.at(n->input(1)).IValue())));
+                          break;
+                        }
+                        case 3: {
+                          tuple = c10::ivalue::Tuple::create(
+                              std::move(*(args.at(n->input(0)).IValue())),
+                              std::move(*(args.at(n->input(1)).IValue())),
+                              std::move(*(args.at(n->input(2)).IValue())));
+                          break;
+                        }
+                        default: {
+                          std::vector<c10::IValue> elems;
+                          for (size_t i = 0; i < num_inputs; i++) {
+                            elems.push_back(*(args.at(n->input(i)).IValue()));
+                          }
+                          tuple = c10::ivalue::Tuple::create(std::move(elems));
+                          break;
                        }
-                        tuple = c10::ivalue::Tuple::create(std::move(elems));
-                        break;
                      }
-                    }
-                    return c10::optional<torch::jit::IValue>(std::move(tuple));
-                  }})
+                      return c10::optional<torch::jit::IValue>(std::move(tuple));
+                    }})
        .evaluator({torch::jit::prim::TupleIndex,
                    [](const torch::jit::Node* n, kwargs& args) -> c10::optional<torch::jit::IValue> {
                      // Outputs is an IValue which has list of tensors which can be found in ctx->evaluated_value_map
diff --git a/workspace/core/conversion/converters/converter_util.cpp b/tmp/changes.txt
index 7bca1a9..9ef39c0 100644
--- a/workspace/core/conversion/converters/converter_util.cpp
+++ b/tmp/changes.txt
@@ -125,9 +125,11 @@ nvinfer1::ILayer* add_elementwise(
  }

  // Two types are compatible if they are the same type or are both in the set {kFLOAT, kHALF}
-  auto fp32_and_fp16 = (self->getType() == nvinfer1::DataType::kFLOAT) && (other->getType() == nvinfer1::DataType::kHALF);
-  auto fp16_and_fp32 = (other->getType() == nvinfer1::DataType::kFLOAT) && (self->getType() == nvinfer1::DataType::kHALF);
-  if (!fp32_and_fp16 && !fp16_and_fp32){
+  auto fp32_and_fp16 =
+      (self->getType() == nvinfer1::DataType::kFLOAT) && (other->getType() == nvinfer1::DataType::kHALF);
+  auto fp16_and_fp32 =
+      (other->getType() == nvinfer1::DataType::kFLOAT) && (self->getType() == nvinfer1::DataType::kHALF);
+  if (!fp32_and_fp16 && !fp16_and_fp32) {
    if (self->getType() > other->getType()) {
      self = castITensor(ctx, self, other->getType());
    } else if (self->getType() < other->getType()) {
diff --git a/workspace/core/conversion/converters/impl/unary.cpp b/tmp/changes.txt
index a1d03a3..6b0ee2b 100644
--- a/workspace/core/conversion/converters/impl/unary.cpp
+++ b/tmp/changes.txt
@@ -10,45 +10,41 @@ namespace converters {
namespace impl {
namespace {

-
auto abs_registration TORCHTRT_UNUSED = RegisterNodeConversionPatterns().pattern(
-  {"aten::abs (Tensor self) -> Tensor",
-  [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
-    auto in = args[0].ITensor();
-    bool unary_supported_input = in->getType() == nvinfer1::DataType::kFLOAT
-     || in->getType() == nvinfer1::DataType::kHALF
-     || in->getType() == nvinfer1::DataType::kINT8;
-    if(unary_supported_input){
-      auto unary_layer = ctx->net->addUnary(*in, nvinfer1::UnaryOperation::kABS);
-      TORCHTRT_CHECK(unary_layer, "Unable to create abs layer from node: " << *n);
-      unary_layer->setName(util::node_info(n).c_str());
-      auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], unary_layer->getOutput(0));
-      LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
-      return true;
-    }
-    else{
-      //For types not supported by kABS, use an elementwise implementation abs(x) = max(x, -1 * x)
-      at::Tensor neg_one = torch::full({1}, -1).to(util::TRTDataTypeToScalarType(in->getType()));
-      auto neg_one_const = tensor_to_const(ctx, neg_one);
-      auto neg_layer = add_elementwise(
+    {"aten::abs (Tensor self) -> Tensor", [](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
+       auto in = args[0].ITensor();
+       bool unary_supported_input = in->getType() == nvinfer1::DataType::kFLOAT ||
+           in->getType() == nvinfer1::DataType::kHALF || in->getType() == nvinfer1::DataType::kINT8;
+       if (unary_supported_input) {
+         auto unary_layer = ctx->net->addUnary(*in, nvinfer1::UnaryOperation::kABS);
+         TORCHTRT_CHECK(unary_layer, "Unable to create abs layer from node: " << *n);
+         unary_layer->setName(util::node_info(n).c_str());
+         auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], unary_layer->getOutput(0));
+         LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
+         return true;
+       } else {
+         // For types not supported by kABS, use an elementwise implementation abs(x) = max(x, -1 * x)
+         at::Tensor neg_one = torch::full({1}, -1).to(util::TRTDataTypeToScalarType(in->getType()));
+         auto neg_one_const = tensor_to_const(ctx, neg_one);
+         auto neg_layer = add_elementwise(
             ctx,
             nvinfer1::ElementWiseOperation::kPROD,
             in,
             neg_one_const,
             util::node_info(n) + std::string("_Negation"));
-      TORCHTRT_CHECK(neg_layer, "Unable to create prod layer from node: " << *n);
-      auto max_layer = add_elementwise(
+         TORCHTRT_CHECK(neg_layer, "Unable to create prod layer from node: " << *n);
+         auto max_layer = add_elementwise(
             ctx,
             nvinfer1::ElementWiseOperation::kMAX,
             in,
             neg_layer->getOutput(0),
             util::node_info(n) + std::string("_Max"));
-      TORCHTRT_CHECK(max_layer, "Unable to create max layer from node: " << *n);
-      auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], max_layer->getOutput(0));
-      LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
-      return true;
-    }
-  }});
+         TORCHTRT_CHECK(max_layer, "Unable to create max layer from node: " << *n);
+         auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], max_layer->getOutput(0));
+         LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions());
+         return true;
+       }
+     }});

#define convert(unary, trt_type)                                                               \
  auto unary##_registrations TORCHTRT_UNUSED = RegisterNodeConversionPatterns().pattern(       \
diff --git a/workspace/tests/core/conversion/converters/test_element_wise.cpp b/tmp/changes.txt
index 939c9b7..db60896 100644
--- a/workspace/tests/core/conversion/converters/test_element_wise.cpp
+++ b/tmp/changes.txt
@@ -30,14 +30,15 @@ void pointwise_test_helper(
    torch_inputs.push_back(at::randint(1, 5, shape2, {at::kCUDA}));
  }

-  TORCHTRT_CHECK(!((int_tensors && (float_int_tensors || int_float_tensors)) || (float_int_tensors && int_float_tensors)),
-    "Invalid test configuration, only one of int_tensors, float_int_tensors, int_float_tensors can be true");
+  TORCHTRT_CHECK(
+      !((int_tensors && (float_int_tensors || int_float_tensors)) || (float_int_tensors && int_float_tensors)),
+      "Invalid test configuration, only one of int_tensors, float_int_tensors, int_float_tensors can be true");

-  if(int_tensors){
-    for(size_t i = 0UL; i < torch_inputs.size(); ++i){
+  if (int_tensors) {
+    for (size_t i = 0UL; i < torch_inputs.size(); ++i) {
      torch_inputs[i] = torch_inputs[i].to(at::kInt);
    }
-  } else if(float_int_tensors) {
+  } else if (float_int_tensors) {
    TORCHTRT_CHECK(!singleInput, "float_int_tensors tests require two inputs");
    torch_inputs[0] = torch_inputs[0].to(at::kFloat);
    torch_inputs[1] = torch_inputs[1].to(at::kInt);
diff --git a/workspace/tests/core/conversion/converters/test_unary.cpp b/tmp/changes.txt
index a7ab3bb..1d40c3c 100644
--- a/workspace/tests/core/conversion/converters/test_unary.cpp
+++ b/tmp/changes.txt
@@ -1,9 +1,9 @@
#include <string>
-#include "torch/torch.h"
#include "core/compiler.h"
#include "gtest/gtest.h"
#include "tests/util/util.h"
#include "torch/csrc/jit/ir/irparser.h"
+#include "torch/torch.h"

namespace {
std::string gen_test_graph(const std::string& unary) {
@@ -22,7 +22,7 @@ TEST(Converters, ATenAbsIntConvertsCorrectly) {

  auto in = at::tensor({-1, 1, -2, 2, -3, 3}, {at::kCUDA}).to(torch::kInt32);
  auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
-  auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in});  
+  auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {in});

  in = at::clone(in);
  params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
diff --git a/workspace/tests/core/conversion/converters/test_select.cpp b/tmp/changes.txt
index 03b6bda..67b760a 100644
--- a/workspace/tests/core/conversion/converters/test_select.cpp
+++ b/tmp/changes.txt
@@ -376,7 +376,7 @@ TEST(Converters, ATenSliceListConvertsCorrectly) {
          %slice : Tensor[] = aten::slice(%list, %1, %2, %3)
          %out.1 : Tensor, %out.2 : Tensor = prim::ListUnpack(%slice)
          return (%out.1, %out.2))IR";
-  
+
  auto g = std::make_shared<torch::jit::Graph>();

  torch::jit::parseIR(graph, g.get());
diff --git a/workspace/tests/cpp/test_collections.cpp b/tmp/changes.txt
index 31495a4..d01665a 100644
--- a/workspace/tests/cpp/test_collections.cpp
+++ b/tmp/changes.txt
@@ -135,7 +135,7 @@ TEST(CppAPITests, TestCollectionListInput) {

  auto compile_settings = torch_tensorrt::ts::CompileSpec(complex_input_shape2);
  compile_settings.min_block_size = 1;
-  //compile_settings.torch_executed_ops.push_back("aten::__getitem__");
+  // compile_settings.torch_executed_ops.push_back("aten::__getitem__");

  // // FP16 execution
  compile_settings.enabled_precisions = {torch::kHalf};
diff --git a/workspace/cpp/src/compile_spec.cpp b/tmp/changes.txt
index 8c7bb8b..cfbc228 100644
--- a/workspace/cpp/src/compile_spec.cpp
+++ b/tmp/changes.txt
@@ -69,22 +69,24 @@ torchtrt::core::CompileSpec init_compile_spec(CompileSpec& external) {
    return internal;
  } else {
    torch::jit::IValue converted_input_signature;
-    LOG_WARNING( "Input signature parsing is an experimental feature, behavior and APIs may change");
+    LOG_WARNING("Input signature parsing is an experimental feature, behavior and APIs may change");
    to_internal_input_signature(external.graph_inputs.input_signature, converted_input_signature);
    torchtrt::core::CompileSpec internal(converted_input_signature);

-    TORCHTRT_CHECK(!external.require_full_compilation, \
-      "Grouped inputs currently requires partial compilation to be enabled, \
+    TORCHTRT_CHECK(
+        !external.require_full_compilation,
+        "Grouped inputs currently requires partial compilation to be enabled, \
      this restriction will be relaxed in a future release");

    LOG_DEBUG("Grouped inputs currently requires additional settings to enable the feature");
-    LOG_DEBUG("Adding the following ops to torch_executed_ops:" \
-       << std::endl << "  - aten::__getitem__" \
-       << std::endl << "  - prim::ListConstruct" \
-       << std::endl << "  - prim::ListUnpack" \
-       << std::endl << "  - prim::TupleIndex" \
-       << std::endl << "  - prim::TupleConstruct" \
-       << std::endl << "  - prim::TupleUnpack");
+    LOG_DEBUG(
+        "Adding the following ops to torch_executed_ops:" << std::endl
+                                                          << "  - aten::__getitem__" << std::endl
+                                                          << "  - prim::ListConstruct" << std::endl
+                                                          << "  - prim::ListUnpack" << std::endl
+                                                          << "  - prim::TupleIndex" << std::endl
+                                                          << "  - prim::TupleConstruct" << std::endl
+                                                          << "  - prim::TupleUnpack");
    external.torch_executed_ops.push_back("aten::__getitem__");
    external.torch_executed_ops.push_back("prim::ListConstruct");
    external.torch_executed_ops.push_back("prim::ListUnpack");
ERROR: Some files do not conform to style guidelines

Signed-off-by: Dheeraj Peri <peri.dheeraj@gmail.com>
@peri044
Copy link
Collaborator Author

peri044 commented Aug 9, 2022

@narendasan Rebased with master and made the modifications as discussed in the collections PR

Copy link

@github-actions github-actions bot left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There are some changes that do not conform to C++ style guidelines:

diff --git a/workspace/core/conversion/converters/converter_util.cpp b/tmp/changes.txt
index 68bfa93..fc02dcc 100644
--- a/workspace/core/conversion/converters/converter_util.cpp
+++ b/tmp/changes.txt
@@ -118,9 +118,11 @@ nvinfer1::ILayer* add_elementwise(
  }

  // Two types are compatible if they are the same type or are both in the set {kFLOAT, kHALF}
-  auto fp32_and_fp16 = (self->getType() == nvinfer1::DataType::kFLOAT) && (other->getType() == nvinfer1::DataType::kHALF);
-  auto fp16_and_fp32 = (other->getType() == nvinfer1::DataType::kFLOAT) && (self->getType() == nvinfer1::DataType::kHALF);
-  if (!fp32_and_fp16 && !fp16_and_fp32){
+  auto fp32_and_fp16 =
+      (self->getType() == nvinfer1::DataType::kFLOAT) && (other->getType() == nvinfer1::DataType::kHALF);
+  auto fp16_and_fp32 =
+      (other->getType() == nvinfer1::DataType::kFLOAT) && (self->getType() == nvinfer1::DataType::kHALF);
+  if (!fp32_and_fp16 && !fp16_and_fp32) {
    if (self->getType() > other->getType()) {
      LOG_DEBUG("Type mismatch in node : " << name << ", casting self to " << other->getType());
      self = castITensor(ctx, self, other->getType());
ERROR: Some files do not conform to style guidelines

Copy link

@github-actions github-actions bot left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Code conforms to Python style guidelines

}

// Two types are compatible if they are the same type or are both in the set {kFLOAT, kHALF}
auto fp32_and_fp16 = (self->getType() == nvinfer1::DataType::kFLOAT) && (other->getType() == nvinfer1::DataType::kHALF);
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This does not handle INT32 which was causing tests to fail earlier

@narendasan narendasan added the release: v1.2 Tagged to be included in v1.2 label Aug 9, 2022
@peri044 peri044 closed this Aug 9, 2022
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
cla signed component: conversion Issues re: Conversion stage component: converters Issues re: Specific op converters component: core Issues re: The core compiler release: v1.2 Tagged to be included in v1.2
Projects
None yet
Development

Successfully merging this pull request may close these issues.

3 participants