Skip to content

Commit 60dba12

Browse files
committed
feat: update truncate long/double warning message
Signed-off-by: inocsin <vcheungyi@163.com>
1 parent 69e49e8 commit 60dba12

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

core/conversion/var/Var.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -98,13 +98,13 @@ nvinfer1::ITensor* Var::ITensorOrFreeze(ConversionCtx* ctx) {
9898
if (isIValue()) {
9999
auto tensor = ptr_.ivalue->toTensor();
100100
if ((tensor.scalar_type() == at::kLong || tensor.scalar_type() == at::kDouble) && !ctx->settings.truncate_long_and_double) {
101-
TRTORCH_CHECK(0, "Unable to freeze tensor of type kLong/kDouble into constant layer, try to compile model with truncate_long_and_double ON");
101+
TRTORCH_THROW_ERROR("Unable to freeze tensor of type Int64/Float64 into constant layer, try to compile model with truncate_long_and_double ON");
102102
} else if (tensor.scalar_type() == at::kLong && ctx->settings.truncate_long_and_double) {
103103
weights = converters::Weights(ctx, tensor.toType(at::kInt));
104-
LOG_WARNING("Warning: Truncating weight (constant in the graph) from kLong to kInt to indicate that only constants are affected.");
104+
LOG_WARNING("Warning: Truncating weight (constant in the graph) from Int64 to Int32.");
105105
} else if (tensor.scalar_type() == at::kDouble && ctx->settings.truncate_long_and_double) {
106106
weights = converters::Weights(ctx, tensor.toType(at::kFloat));
107-
LOG_WARNING("Warning: Truncating weight (constant in the graph) from kDouble to kFloat to indicate that only constants are affected.");
107+
LOG_WARNING("Warning: Truncating weight (constant in the graph) from Float64 to Float32.");
108108
} else {
109109
weights = converters::Weights(ctx, tensor);
110110
}

0 commit comments

Comments
 (0)