diff --git a/core/conversion/evaluators/aten.cpp b/core/conversion/evaluators/aten.cpp index 8cb6cf2dd6..8b5b6e0af3 100644 --- a/core/conversion/evaluators/aten.cpp +++ b/core/conversion/evaluators/aten.cpp @@ -157,6 +157,63 @@ auto aten_registrations TORCHTRT_UNUSED = auto out_tensor = torch::ones(args.at(n->input(0)).unwrapToIntList().vec(), options); return out_tensor; }}) + .evaluator( + {c10::Symbol::fromQualString("aten::new_zeros"), + // aten::new_zeros(Tensor self, int[] size, *, int? dtype=None, int? layout=None, + // Device? device=None, bool? pin_memory=None) -> (Tensor) + [](ConversionCtx* ctx, const torch::jit::Node* n, kwargs& args) -> c10::optional { + auto tensor_info = newTensorImplementation(n, args); + return torch::zeros(tensor_info.first, tensor_info.second); + }}) + .evaluator( + {c10::Symbol::fromQualString("aten::new_ones"), + // aten::new_ones(Tensor self, int[] size, *, int? dtype=None, int? layout=None, + // Device? device=None, bool? pin_memory=None) -> (Tensor) + [](ConversionCtx* ctx, const torch::jit::Node* n, kwargs& args) -> c10::optional { + auto tensor_info = newTensorImplementation(n, args); + return torch::ones(tensor_info.first, tensor_info.second); + }}) + .evaluator( + {c10::Symbol::fromQualString("aten::zeros_like"), + // aten::zeros_like(Tensor self, *, int? dtype=None, int? layout=None, + // Device? device=None, bool? pin_memory=None, int? memory_format=None) -> (Tensor) + [](ConversionCtx* ctx, const torch::jit::Node* n, kwargs& args) -> c10::optional { + return newTensorLikeImplementation( + ctx, n, args, [](const std::vector& dims, const torch::TensorOptions& options) { + return torch::zeros(dims, options); + }); + }}) + .evaluator( + {c10::Symbol::fromQualString("aten::ones_like"), + // aten::ones_like(Tensor self, *, int? dtype=None, int? layout=None, + // Device? device=None, bool? pin_memory=None, int? memory_format=None) -> (Tensor) + [](ConversionCtx* ctx, const torch::jit::Node* n, kwargs& args) -> c10::optional { + return newTensorLikeImplementation( + ctx, n, args, [](const std::vector& dims, const torch::TensorOptions& options) { + return torch::ones(dims, options); + }); + }}) + .evaluator( + {c10::Symbol::fromQualString("aten::fill_"), + // aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> (Tensor(a!)) + [](ConversionCtx* ctx, const torch::jit::Node* n, kwargs& args) -> c10::optional { + auto tensor_var = args.at(n->input(0)); + auto options = torch::TensorOptions().layout(torch::kStrided).device(torch::kCUDA); + std::vector dims; + if (tensor_var.isITensor()) { + auto tensor = tensor_var.ITensor(); + auto dtype = util::TRTDataTypeToScalarType(tensor->getType()); + options = options.dtype(dtype); + dims = util::toVec(tensor->getDimensions()); + } else { + auto tensor = tensor_var.unwrapToTensor(); + options = options.dtype(tensor.dtype()); + dims = tensor.sizes().vec(); + } + auto scalar_value = args.at(n->input(1)).unwrapToScalar(); + auto out_tensor = torch::full(dims, scalar_value, options); + return out_tensor; + }}) .evaluator( {c10::Symbol::fromQualString("aten::full"), // aten::full(int[] size, Scalar fill_value, *, int? dtype=None, int? layout=None, diff --git a/core/conversion/evaluators/eval_util.cpp b/core/conversion/evaluators/eval_util.cpp index 0a0b97cfe1..71b6de9eb2 100644 --- a/core/conversion/evaluators/eval_util.cpp +++ b/core/conversion/evaluators/eval_util.cpp @@ -367,6 +367,77 @@ at::Tensor createTensorFromList( return tensor; } +std::pair, torch::TensorOptions> newTensorImplementation(const torch::jit::Node* n, kwargs& args) { + auto options = torch::TensorOptions().layout(torch::kStrided).device(torch::kCUDA); + + // Input 2 is the dtype + if (!args.at(n->input(2)).isNone() && !args.at(n->input(2)).IValue()->isNone()) { + options = options.dtype(c10::ScalarType(args.at(n->input(2)).unwrapToInt())); + } else { + auto tensor_var = args.at(n->input(0)); + if (tensor_var.isITensor()) { + auto tensor = tensor_var.ITensor(); + options = options.dtype(scalarTypeToTypeMeta(util::TRTDataTypeToScalarType(tensor->getType()))); + } else { + auto tensor = tensor_var.unwrapToTensor(); + options = options.dtype(tensor.dtype()); + } + } + return std::make_pair(args.at(n->input(1)).unwrapToIntList().vec(), options); +} + +c10::optional newTensorLikeImplementation( + ConversionCtx* ctx, + const torch::jit::Node* n, + kwargs& args, + const std::function&, const torch::TensorOptions&)>& tensor_builder) { + auto options = torch::TensorOptions().layout(torch::kStrided).device(torch::kCUDA); + auto tensor_var = args.at(n->input(0)); + + if (tensor_var.isITensor()) { + auto tensor = tensor_var.ITensor(); + auto dtype = util::TRTDataTypeToScalarType(tensor->getType()); + options = options.dtype(dtype); + } else { + auto tensor = tensor_var.unwrapToTensor(); + options = options.dtype(tensor.dtype()); + } + + // Input 1 is the dtype + if (!args.at(n->input(1)).isNone() && !args.at(n->input(1)).IValue()->isNone()) { + options = options.dtype(c10::ScalarType(args.at(n->input(1)).unwrapToInt())); + } + std::vector tensor_dims; + if (tensor_var.isITensor()) { + auto tensor = tensor_var.ITensor(); + tensor_dims = util::toVec(tensor->getDimensions()); + } else { + auto tensor = tensor_var.unwrapToTensor(); + tensor_dims = tensor.sizes().vec(); + } + if (ctx->settings.allow_shape_tensors && ctx->input_is_dynamic) { + auto self = args.at(n->input(0)).ITensorOrFreeze(ctx); + std::vector dims_vec(self->getDimensions().nbDims, 1); + auto constant = tensor_builder(dims_vec, options); + auto constant_itensor = converters::tensor_to_const(ctx, constant); + // broadcast constant to output shape + std::vector start_vec(self->getDimensions().nbDims, 0); + auto start_offset = util::toDims(c10::IntArrayRef(start_vec)); + auto shape_layer = ctx->net->addShape(*self); + TORCHTRT_CHECK(shape_layer, "Unable to create shape layer from node: " << *n); + shape_layer->setName((util::node_info(n) + "_shape").c_str()); + // slice implements expand + auto slice_layer = ctx->net->addSlice(*constant_itensor, start_offset, self->getDimensions(), start_offset); + TORCHTRT_CHECK(slice_layer, "Unable to create slice layer from node: " << *n); + slice_layer->setInput(2, *shape_layer->getOutput(0)); + slice_layer->setName((util::node_info(n) + "_slice").c_str()); + auto out_tensor = ctx->AssociateValueAndTensor(n->outputs()[0], slice_layer->getOutput(0)); + LOG_DEBUG("Output tensor shape: " << out_tensor->getDimensions()); + return {}; + } + return tensor_builder(tensor_dims, options); +} + } // namespace evaluators } // namespace conversion } // namespace core diff --git a/core/conversion/evaluators/eval_util.h b/core/conversion/evaluators/eval_util.h index 5d0f050981..d2b2569eb9 100644 --- a/core/conversion/evaluators/eval_util.h +++ b/core/conversion/evaluators/eval_util.h @@ -2,6 +2,7 @@ #include "core/conversion/evaluators/evaluators.h" #include "torch/csrc/jit/ir/ir.h" +#include "torch/torch.h" namespace torch_tensorrt { namespace core { @@ -26,6 +27,13 @@ int64_t normalizeIndex(int64_t idx, int64_t list_size); at::Tensor scalar_to_tensor(const at::Scalar& s, const at::Device device = at::kCPU); +std::pair, torch::TensorOptions> newTensorImplementation(const torch::jit::Node* n, kwargs& args); +c10::optional newTensorLikeImplementation( + ConversionCtx* ctx, + const torch::jit::Node* n, + kwargs& args, + const std::function&, const torch::TensorOptions&)>& tensor_builder); + } // namespace evaluators } // namespace conversion } // namespace core diff --git a/tests/core/conversion/evaluators/test_aten_evaluators.cpp b/tests/core/conversion/evaluators/test_aten_evaluators.cpp index c21f7b5461..6cf7319b18 100644 --- a/tests/core/conversion/evaluators/test_aten_evaluators.cpp +++ b/tests/core/conversion/evaluators/test_aten_evaluators.cpp @@ -207,6 +207,196 @@ TEST(Evaluators, ZerosDataTypeEvaluatesCorrectly) { ASSERT_TRUE(at::equal(jit_results[0].toTensor().to(at::kCUDA), trt_results[0].toTensor())); } +TEST(Evaluators, NewZerosEvaluatesCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : None = prim::Constant() # :0:0 + %3 : int[] = aten::size(%x.1) # :7:9 + %z.1 : Tensor = aten::new_zeros(%x.1, %3, %2, %2, %2, %2) + return (%z.1))IR"; + + auto in = at::randint(1, 10, {1, 5, 5, 5}, {at::kCUDA}); + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto jit_results = torch_tensorrt::tests::util::EvaluateGraphJIT(g, {in}); + auto trt_results = torch_tensorrt::tests::util::EvaluateGraph(g->block(), {in}); + + ASSERT_TRUE(at::equal(jit_results[0].toTensor().to(at::kCUDA), trt_results[0].toTensor())); +} + +TEST(Evaluators, NewZerosDataTypeEvaluatesCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=5]() # :0:0 (Float16) + %3 : None = prim::Constant() # :0:0 + %4 : int[] = aten::size(%x.1) # :7:9 + %z.1 : Tensor = aten::new_zeros(%x.1, %4, %2, %3, %3, %3) + return (%z.1))IR"; + + auto in = at::randint(1, 10, {1, 5, 5, 5}, {at::kCUDA}); + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto jit_results = torch_tensorrt::tests::util::EvaluateGraphJIT(g, {in}); + auto trt_results = torch_tensorrt::tests::util::EvaluateGraph(g->block(), {in}); + + ASSERT_TRUE(at::equal(jit_results[0].toTensor().to(at::kCUDA), trt_results[0].toTensor())); +} + +TEST(Evaluators, NewOnesEvaluatesCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : None = prim::Constant() # :0:0 + %3 : int[] = aten::size(%x.1) # :7:9 + %z.1 : Tensor = aten::new_ones(%x.1, %3, %2, %2, %2, %2) + return (%z.1))IR"; + + auto in = at::randint(1, 10, {1, 5, 5, 5}, {at::kCUDA}); + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto jit_results = torch_tensorrt::tests::util::EvaluateGraphJIT(g, {in}); + auto trt_results = torch_tensorrt::tests::util::EvaluateGraph(g->block(), {in}); + + ASSERT_TRUE(at::equal(jit_results[0].toTensor().to(at::kCUDA), trt_results[0].toTensor())); +} + +TEST(Evaluators, NewOnesDataTypeEvaluatesCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=5]() # :0:0 (Float16) + %3 : None = prim::Constant() # :0:0 + %4 : int[] = aten::size(%x.1) # :7:9 + %z.1 : Tensor = aten::new_ones(%x.1, %4, %2, %3, %3, %3) + return (%z.1))IR"; + + auto in = at::randint(1, 10, {1, 5, 5, 5}, {at::kCUDA}); + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto jit_results = torch_tensorrt::tests::util::EvaluateGraphJIT(g, {in}); + auto trt_results = torch_tensorrt::tests::util::EvaluateGraph(g->block(), {in}); + + ASSERT_TRUE(at::equal(jit_results[0].toTensor().to(at::kCUDA), trt_results[0].toTensor())); +} + +TEST(Evaluators, ZerosLikeEvaluatesCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : None = prim::Constant() # :0:0 + %z.1 : Tensor = aten::zeros_like(%x.1, %2, %2, %2, %2, %2) + return (%z.1))IR"; + + auto in = at::randint(1, 10, {1, 5, 5, 5}, {at::kCUDA}); + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto jit_results = torch_tensorrt::tests::util::EvaluateGraphJIT(g, {in}); + auto trt_results = torch_tensorrt::tests::util::EvaluateGraph(g->block(), {in}); + + ASSERT_TRUE(at::equal(jit_results[0].toTensor().to(at::kCUDA), trt_results[0].toTensor())); +} + +TEST(Evaluators, ZerosLikeDataTypeEvaluatesCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=5]() # :0:0 (Float16) + %3 : None = prim::Constant() + %z.1 : Tensor = aten::zeros_like(%x.1, %2, %3, %3, %3, %3) + return (%z.1))IR"; + + auto in = at::randint(1, 10, {1, 5, 5, 5}, {at::kCUDA}); + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto jit_results = torch_tensorrt::tests::util::EvaluateGraphJIT(g, {in}); + auto trt_results = torch_tensorrt::tests::util::EvaluateGraph(g->block(), {in}); + + ASSERT_TRUE(at::equal(jit_results[0].toTensor().to(at::kCUDA), trt_results[0].toTensor())); +} + +TEST(Evaluators, ZerosLikeDynamic) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=5]() # :0:0 (Float16) + %3 : None = prim::Constant() + %z.1 : Tensor = aten::zeros_like(%x.1, %2, %3, %3, %3, %3) + return (%z.1))IR"; + auto in = at::randint(1, 10, {23, 17, 5, 29}, {at::kCUDA}); + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto jit_results = torch_tensorrt::tests::util::EvaluateGraphJIT(g, {in}); + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngineDynamic(g, params, {in}, true, true); + + ASSERT_TRUE(at::equal(jit_results[0].toTensor().to(at::kCUDA), trt_results[0])); +} + +TEST(Evaluators, OnesLikeEvaluatesCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : None = prim::Constant() # :0:0 + %z.1 : Tensor = aten::ones_like(%x.1, %2, %2, %2, %2, %2) + return (%z.1))IR"; + + auto in = at::randint(1, 10, {1, 5, 5, 5}, {at::kCUDA}); + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto jit_results = torch_tensorrt::tests::util::EvaluateGraphJIT(g, {in}); + auto trt_results = torch_tensorrt::tests::util::EvaluateGraph(g->block(), {in}); + + ASSERT_TRUE(at::equal(jit_results[0].toTensor().to(at::kCUDA), trt_results[0].toTensor())); +} + +TEST(Evaluators, OnesLikeDataTypeEvaluatesCorrectly) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=5]() # :0:0 (Float16) + %3 : None = prim::Constant() + %z.1 : Tensor = aten::ones_like(%x.1, %2, %3, %3, %3, %3) + return (%z.1))IR"; + + auto in = at::randint(1, 10, {1, 5, 5, 5}, {at::kCUDA}); + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto jit_results = torch_tensorrt::tests::util::EvaluateGraphJIT(g, {in}); + auto trt_results = torch_tensorrt::tests::util::EvaluateGraph(g->block(), {in}); + + ASSERT_TRUE(at::equal(jit_results[0].toTensor().to(at::kCUDA), trt_results[0].toTensor())); +} + +TEST(Evaluators, OnesLikeDynamic) { + const auto graph = R"IR( + graph(%x.1 : Tensor): + %2 : int = prim::Constant[value=5]() # :0:0 (Float16) + %3 : None = prim::Constant() + %z.1 : Tensor = aten::ones_like(%x.1, %2, %3, %3, %3, %3) + return (%z.1))IR"; + auto in = at::randint(1, 10, {3, 6}, {at::kCUDA}); + + auto g = std::make_shared(); + torch::jit::parseIR(graph, g.get()); + + auto jit_results = torch_tensorrt::tests::util::EvaluateGraphJIT(g, {in}); + auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {}); + auto trt_results = torch_tensorrt::tests::util::RunGraphEngineDynamic(g, params, {in}, true, true); + + ASSERT_TRUE(at::equal(jit_results[0].toTensor().to(at::kCUDA), trt_results[0])); +} + TEST(Evaluators, ATenArangeIntEvaluatesCorrectly) { const auto graph = R"IR( graph(): diff --git a/tests/core/partitioning/test_loop_fallback.cpp b/tests/core/partitioning/test_loop_fallback.cpp index 1da56f1a8d..4d1729fd28 100644 --- a/tests/core/partitioning/test_loop_fallback.cpp +++ b/tests/core/partitioning/test_loop_fallback.cpp @@ -53,6 +53,7 @@ TEST(Partitioning, CheckLoopFallbackNoEvalCompilesCorrectly) { std::vector input_ranges{torch_tensorrt::core::ir::Input({1, 10})}; torch_tensorrt::core::CompileSpec cfg(input_ranges); + cfg.partitioning_info.forced_fallback_operators.push_back("aten::ones_like"); cfg.partitioning_info.enabled = true; auto jit_results = mod.forward(jit_inputs_ivalues).toTensor();