Skip to content

Commit

Permalink
feat: support setting input types of subgraph in fallback, handle Ten…
Browse files Browse the repository at this point in the history
…sor type in evaluated_value_map branch in MarkOutputs

Signed-off-by: vcheungyi@163.com <vcheungyi@163.com>
  • Loading branch information
inocsin committed Oct 19, 2021
1 parent 4d95b04 commit 4778b2b
Show file tree
Hide file tree
Showing 4 changed files with 30 additions and 1 deletion.
1 change: 1 addition & 0 deletions core/compiler.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -261,6 +261,7 @@ GraphAndMapping ConstructFallbackGraph(
if (seg_block.target() == partitioning::SegmentedBlock::kTensorRT) {
std::vector<ir::Input> inputs;
for (auto& shape : seg_block.in_shape()) {
// set the input shape with data type, using copy constructor
inputs.push_back(ir::Input(shape));
}
// update the input ranges for each segments
Expand Down
18 changes: 18 additions & 0 deletions core/conversion/conversion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@

#include "c10/util/intrusive_ptr.h"
#include "core/conversion/tensorcontainer/TensorContainer.h"
#include "core/util/trt_util.h"
#include "core/conversion/converters/converter_util.h"

namespace trtorch {
namespace core {
Expand Down Expand Up @@ -210,6 +212,21 @@ void MarkOutputs(ConversionCtx* ctx, at::ArrayRef<const torch::jit::Value*> outp
LOG_INFO(
ctx->logger, "Marking Output " << out->debugName() << " named " << name << " in engine (ctx.MarkOutput)");
ctx->num_outputs += 1;
} else if(out_ivalue.isTuple()) {
TRTORCH_THROW_ERROR("Tuple type. Only a single tensor or a TensorList type is supported.");
} else if(out_ivalue.isList()) {
TRTORCH_THROW_ERROR("List type. Only a single tensor or a TensorList type is supported.");
} else if(out_ivalue.isScalar()) {
TRTORCH_THROW_ERROR("Scalar type. Only a single tensor or a TensorList type is supported.");
} else if(out_ivalue.isTensor()) {
// prim::NumToTensor will go to here
std::string name = std::string("output_") + std::to_string(ctx->num_outputs);
auto out_tensor = trtorch::core::conversion::converters::tensor_to_const(ctx, out_ivalue.toTensor(), "");
out_tensor->setName(name.c_str());
ctx->net->markOutput(*out_tensor);
LOG_INFO(
ctx->logger, "Marking Output " << out->debugName() << " named " << name << " in engine (ctx.MarkOutput)");
ctx->num_outputs += 1;
} else {
TRTORCH_THROW_ERROR("Unknown output type. Only a single tensor or a TensorList type is supported.");
}
Expand Down Expand Up @@ -361,6 +378,7 @@ void ConvertBlockToNetDef(
ConversionInfo build_info,
GraphParams& static_params) {
LOG_INFO(ctx->logger, "Converting Block");
LOG_DEBUG(ctx->logger, *b->owningGraph());

auto inputs = b->inputs();
AddParamsToCtxValueMap(ctx, static_params);
Expand Down
11 changes: 10 additions & 1 deletion core/partitioning/shape_analysis.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,16 @@ void getSegmentsOutputByRunning(
std::vector<ir::Input> input_shape;
for (auto& i : seg_block.raw_inputs()) {
if (ivalues_maps[i].isTensor()) {
input_shape.push_back(util::toVec(util::toDims(ivalues_maps[i].toTensor().sizes())));
// set the input_shape and data_type
c10::optional<nvinfer1::DataType> dtype = util::optTypeMetaToTRTDataType(ivalues_maps[i].toTensor().dtype());
nvinfer1::DataType nv_dtype;
if (dtype == c10::nullopt) {
nv_dtype = nvinfer1::DataType::kFLOAT;
} else {
nv_dtype = dtype.value();
}
input_shape.push_back(ir::Input(util::toVec(util::toDims(ivalues_maps[i].toTensor().sizes())),
nv_dtype));
}
}

Expand Down
1 change: 1 addition & 0 deletions core/util/trt_util.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -240,6 +240,7 @@ const std::unordered_map<at::ScalarType, nvinfer1::DataType>& get_at_trt_type_ma
{at::kInt, nvinfer1::DataType::kINT32},
{at::kChar, nvinfer1::DataType::kINT8},
{at::kBool, nvinfer1::DataType::kBOOL},
{at::kLong, nvinfer1::DataType::kINT32},
};
return at_trt_type_map;
}
Expand Down

0 comments on commit 4778b2b

Please sign in to comment.