Skip to content

Commit bd0e2c5

Browse files
committed
Ensure sample encapsulation in Tensor Vector
Add APIs matching TensorList to TensorVector: * sample pointer accessors * Set/GetMeta Change operator[] to return TensorView<DynamicType>. Adjust code where necessary. Signed-off-by: Krzysztof Lecki <klecki@nvidia.com>
1 parent e1c002f commit bd0e2c5

34 files changed

+156
-128
lines changed

dali/benchmark/displacement_cpu_bench.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2017-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
// Copyright (c) 2017-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -88,7 +88,7 @@ void DisplacementBench(benchmark::State& st) {//NOLINT
8888
// tensor out is resized by operator itself in DisplacementFilter::DataDependentSetup()
8989

9090
// TODO(klecki) Accomodate to use different inputs from test data
91-
auto *ptr = (*tensor_in)[0].template mutable_data<T>();
91+
auto *ptr = (*tensor_in).template mutable_tensor<T>(0);
9292
for (int i = 0; i < N; i++) {
9393
ptr[i] = i;
9494
}

dali/benchmark/operator_bench.h

+6-9
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
// Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -54,16 +54,13 @@ class OperatorBench : public DALIBenchmark {
5454
auto op_ptr = InstantiateOperator(op_spec);
5555

5656
auto data_in = std::make_shared<TensorVector<CPUBackend>>(batch_size);
57-
for (auto &in_ptr : *data_in) {
58-
in_ptr = std::make_shared<Tensor<CPUBackend>>();
59-
in_ptr->set_type<T>();
60-
in_ptr->Resize({H, W, C});
61-
in_ptr->SetLayout("HWC");
62-
}
57+
data_in->set_type<T>();
58+
data_in->Resize(uniform_list_shape(batch_size, TensorShape<>{H, W, C}));
59+
data_in->SetLayout("HWC");
6360

6461
if (fill_in_data) {
65-
for (auto &in_ptr : *data_in) {
66-
auto *ptr = in_ptr->template mutable_data<T>();
62+
for (int sample_id = 0; sample_id < batch_size; sample_id++) {
63+
auto *ptr = data_in->template mutable_tensor<T>(sample_id);
6764
for (int i = 0; i < N; i++) {
6865
ptr[i] = static_cast<T>(i);
6966
}

dali/operators/audio/nonsilence_op.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
// Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -228,8 +228,8 @@ class NonsilenceOperatorCpu : public NonsilenceOperator<CPUBackend> {
228228
args.reset_interval = reset_interval_;
229229

230230
auto res = DetectNonsilenceRegion(intermediate_buffers_[thread_id], args);
231-
auto beg_ptr = output_begin[sample_id].mutable_data<int>();
232-
auto len_ptr = output_length[sample_id].mutable_data<int>();
231+
auto beg_ptr = output_begin.mutable_tensor<int>(sample_id);
232+
auto len_ptr = output_length.mutable_tensor<int>(sample_id);
233233
*beg_ptr = res.first;
234234
*len_ptr = res.second;
235235
}, in_shape.tensor_size(sample_id));

dali/operators/audio/preemphasis_filter_op.cc

+5-5
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
// Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -65,11 +65,11 @@ void PreemphasisFilterCPU::RunImplTyped(workspace_t<CPUBackend> &ws) {
6565
for (int sample_id = 0; sample_id < nsamples; sample_id++) {
6666
tp.AddWork(
6767
[this, &output, &input, sample_id](int thread_id) {
68-
const auto in_ptr = input[sample_id].data<InputType>();
69-
auto out_ptr = output[sample_id].mutable_data<OutputType>();
70-
DALI_ENFORCE(input[sample_id].shape() == output[sample_id].shape(),
68+
const auto in_ptr = input.tensor<InputType>(sample_id);
69+
auto out_ptr = output.mutable_tensor<OutputType>(sample_id);
70+
DALI_ENFORCE(input.tensor_shape(sample_id) == output.tensor_shape(sample_id),
7171
"Input and output shapes don't match");
72-
auto n = volume(output[sample_id].shape());
72+
auto n = volume(output.tensor_shape(sample_id));
7373
auto coeff = preemph_coeff_[sample_id];
7474
if (coeff == 0.0f) {
7575
for (int64_t j = 0; j < n; j++) {

dali/operators/decoder/audio/audio_decoder_op.cc

+4-4
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
// Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -88,13 +88,13 @@ AudioDecoderCpu::SetupImpl(std::vector<OutputDesc> &output_desc, const workspace
8888

8989
for (int i = 0; i < batch_size; i++) {
9090
auto &meta = sample_meta_[i] =
91-
decoders_[i]->Open({static_cast<const char *>(input[i].raw_data()),
92-
input[i].shape().num_elements()});
91+
decoders_[i]->Open({static_cast<const char *>(input.raw_tensor(i)),
92+
input.tensor_shape(i).num_elements()});
9393
TensorShape<> data_sample_shape = DecodedAudioShape(
9494
meta, use_resampling_ ? target_sample_rates_[i] : -1.0f, downmix_);
9595
shape_data.set_tensor_shape(i, data_sample_shape);
9696
shape_rate.set_tensor_shape(i, {});
97-
files_names_[i] = input[i].GetSourceInfo();
97+
files_names_[i] = input.GetMeta(i).GetSourceInfo();
9898
}
9999

100100
output_desc[0] = { shape_data, output_type_ };

dali/operators/generic/cast.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,8 @@ void CastCPU::RunImpl(HostWorkspace &ws) {
5151
TYPE_SWITCH(itype, type2id, IType, CAST_ALLOWED_TYPES, (
5252

5353
for (int sample_id = 0; sample_id < num_samples; sample_id++) {
54-
auto *out = output[sample_id].mutable_data<OType>();
55-
const auto *in = input[sample_id].data<IType>();
54+
auto *out = output.mutable_tensor<OType>(sample_id);
55+
const auto *in = input.tensor<IType>(sample_id);
5656
auto size = input_shape.tensor_size(sample_id);
5757
tp.AddWork([out, in, size](int thread_id) { CpuHelper<OType, IType>(out, in, size); },
5858
size);

dali/operators/generic/erase/erase_utils.h

+9-9
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
1+
// Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -95,17 +95,17 @@ std::vector<kernels::EraseArgs<T, Dims>> GetEraseArgs(const OpSpec &spec,
9595

9696
for (int i = 0; i < nsamples; i++) {
9797
if (has_tensor_roi_anchor) {
98-
const auto& anchor = ws.ArgumentInput("anchor")[i];
99-
assert(anchor.size() > 0);
100-
roi_anchor.resize(anchor.size());
101-
std::memcpy(roi_anchor.data(), anchor.data<float>(), sizeof(float) * roi_anchor.size());
98+
const auto& anchor = view<const float>(ws.ArgumentInput("anchor")[i]);
99+
assert(anchor.shape.num_elements() > 0);
100+
roi_anchor.resize(anchor.shape.num_elements());
101+
std::memcpy(roi_anchor.data(), anchor.data, sizeof(float) * roi_anchor.size());
102102
}
103103

104104
if (has_tensor_roi_shape) {
105-
const auto& shape = ws.ArgumentInput("shape")[i];
106-
assert(shape.size() > 0);
107-
roi_shape.resize(shape.size());
108-
std::memcpy(roi_shape.data(), shape.data<float>(), sizeof(float) * roi_shape.size());
105+
const auto& shape = view<const float>(ws.ArgumentInput("shape")[i]);
106+
assert(shape.shape.num_elements() > 0);
107+
roi_shape.resize(shape.num_elements());
108+
std::memcpy(roi_shape.data(), shape.data, sizeof(float) * roi_shape.size());
109109
}
110110

111111
DALI_ENFORCE(roi_anchor.size() == roi_shape.size());

dali/operators/generic/lookup_table.cc

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
// Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -25,8 +25,8 @@ void LookupValuesImpl(ThreadPool &tp, TensorVector<CPUBackend> &output,
2525
const Output *lookup_table, const Output default_value) {
2626
for (int sample_idx = 0; sample_idx < shape.num_samples(); sample_idx++) {
2727
auto data_size = shape.tensor_size(sample_idx);
28-
auto *out_data = output[sample_idx].mutable_data<Output>();
29-
const auto *in_data = input[sample_idx].data<Input>();
28+
auto *out_data = output.mutable_tensor<Output>(sample_idx);
29+
const auto *in_data = input.tensor<Input>(sample_idx);
3030
tp.AddWork(
3131
[=](int thread_id) {
3232
for (int64_t i = 0; i < data_size; i++) {

dali/operators/generic/reshape.cc

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
// Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -393,8 +393,8 @@ void Reshape<CPUBackend>::RunImpl(HostWorkspace &ws) {
393393
out.Resize(output_shape_, output_type_->id());
394394
int N = output_shape_.num_samples();
395395
for (int i = 0; i < N; i++) {
396-
assert(out[i].raw_data() == in[i].raw_data());
397-
assert(out[i].shape() == output_shape_[i]);
396+
assert(out[i].data == in[i].data);
397+
assert(out[i].shape == output_shape_[i]);
398398
}
399399
out.SetLayout(layout);
400400
}

dali/operators/generic/shapes.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
// Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -77,7 +77,7 @@ class Shapes : public Operator<Backend> {
7777
int n = out.num_samples();
7878
assert(n == shape.num_samples());
7979
for (int i = 0; i < n; i++) {
80-
type *data = out[i].mutable_data<type>();
80+
type *data = out.mutable_tensor<type>(i);
8181
auto sample_shape = shape.tensor_shape_span(i);
8282
for (int j = 0; j < shape.sample_dim(); j++)
8383
data[j] = sample_shape[j];

dali/operators/generic/slice/slice_base.cc

+1-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
// Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -58,7 +58,6 @@ bool SliceBaseCpu<OutputType, InputType, Dims>::SetupImpl(std::vector<OutputDesc
5858
output_desc[0].shape.resize(nsamples, Dims);
5959

6060
kernels::KernelContext ctx;
61-
auto in_view = view<const InputType, Dims>(input);
6261
for (int i = 0; i < nsamples; i++) {
6362
auto in_view = view<const InputType, Dims>(input[i]);
6463
auto req = Kernel().Setup(ctx, in_view, args_[i]);

dali/operators/generic/transpose/transpose.cc

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
// Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -46,8 +46,8 @@ class TransposeCPU : public Transpose<CPUBackend> {
4646
TensorShape<> src_ts = input.shape()[i];
4747
auto dst_ts = permute(src_ts, perm_);
4848
kernels::TransposeGrouped(
49-
TensorView<StorageCPU, T>{output[i].mutable_data<T>(), dst_ts},
50-
TensorView<StorageCPU, const T>{input[i].data<T>(), src_ts}, make_cspan(perm_));
49+
TensorView<StorageCPU, T>{output.mutable_tensor<T>(i), dst_ts},
50+
TensorView<StorageCPU, const T>{input.tensor<T>(i), src_ts}, make_cspan(perm_));
5151
}, out_shape.tensor_size(i));
5252
}
5353
), DALI_FAIL(make_string("Unsupported input type: ", input_type))); // NOLINT

dali/operators/geometry/coord_flip.cc

+4-4
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
// Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -77,11 +77,11 @@ void CoordFlipCPU::RunImpl(workspace_t<CPUBackend> &ws) {
7777
mirrored_origin[y_dim_] = 2.0f * spec_.GetArgument<float>("center_y", &ws, sample_id);
7878
mirrored_origin[z_dim_] = 2.0f * spec_.GetArgument<float>("center_z", &ws, sample_id);
7979

80-
auto in_size = volume(input[sample_id].shape());
80+
auto in_size = volume(input.tensor_shape(sample_id));
8181
thread_pool.AddWork(
8282
[this, &input, in_size, &output, sample_id, flip_dim, mirrored_origin](int thread_id) {
83-
const auto *in = input[sample_id].data<float>();
84-
auto *out = output[sample_id].mutable_data<float>();
83+
const auto *in = input.tensor<float>(sample_id);
84+
auto *out = output.mutable_tensor<float>(sample_id);
8585
int d = 0;
8686
int64_t i = 0;
8787
for (; i < in_size; i++, d++) {

dali/operators/geometry/mt_transform_attr_test.cc

+7-7
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2020-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
// Copyright (c) 2020-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -155,11 +155,11 @@ TEST(MTTransformAttr, MInputTInput) {
155155
Minp->Resize(Mtls, DALI_FLOAT);
156156
Tinp->Resize(Ttls, DALI_FLOAT);
157157
for (int i = 0; i < N; i++) {
158-
float *data = (*Minp)[i].mutable_data<float>();
158+
float *data = Minp->mutable_tensor<float>(i);
159159
for (int j = 0; j < volume(Mtls[i]); j++)
160160
data[j] = 1 + j + i * 10;
161161

162-
data = (*Tinp)[i].mutable_data<float>();
162+
data = Tinp->mutable_tensor<float>(i);
163163
for (int j = 0; j < volume(Ttls[i]); j++)
164164
data[j] = 10 + j * 10 + i * 100;
165165
}
@@ -198,9 +198,9 @@ TEST(MTTransformAttr, MScalarInputTScalarInput) {
198198
Minp->Resize(tls, DALI_FLOAT);
199199
Tinp->Resize(tls, DALI_FLOAT);
200200
for (int i = 0; i < N; i++) {
201-
float *data = (*Minp)[i].mutable_data<float>();
201+
float *data = Minp->mutable_tensor<float>(i);
202202
data[0] = i + 10;
203-
data = (*Tinp)[i].mutable_data<float>();
203+
data = Tinp->mutable_tensor<float>(i);
204204
data[0] = i + 100;
205205
}
206206

@@ -233,7 +233,7 @@ TEST(MTTransformAttr, MTInput) {
233233
int N = tls.num_samples();;
234234
MTinp->Resize(tls, DALI_FLOAT);
235235
for (int i = 0; i < N; i++) {
236-
auto *data = (*MTinp)[i].mutable_data<float>();
236+
auto *data = MTinp->mutable_tensor<float>(i);
237237
for (int j = 0; j < volume(tls[i]); j++)
238238
data[j] = 1 + j + i * 10;
239239
}
@@ -342,7 +342,7 @@ TEST(MTTransformAttr, MTInput_ErrorSize) {
342342
int N = tls.num_samples();;
343343
MTinp->Resize(tls, DALI_FLOAT);
344344
for (int i = 0; i < N; i++) {
345-
auto *data = (*MTinp)[i].mutable_data<float>();
345+
auto *data = MTinp->mutable_tensor<float>(i);
346346
for (int j = 0; j < volume(tls[i]); j++)
347347
data[j] = 1 + j + i * 10;
348348
}

dali/operators/image/convolution/gaussian_blur.cc

+6-6
Original file line numberDiff line numberDiff line change
@@ -108,10 +108,10 @@ class GaussianBlurOpCpu : public OpImplBase<CPUBackend> {
108108
params_[i] = ObtainSampleParams<axes>(i, spec_, ws);
109109
windows_[i].PrepareWindows(params_[i]);
110110
// We take only last `ndim` siginificant dimensions to handle sequences as well
111-
auto elem_shape = input[i].shape().template last<ndim>();
111+
auto elem_shape = input.tensor_shape(i).template last<ndim>();
112112
auto& req = kmgr_.Setup<Kernel>(i, ctx_, elem_shape, params_[i].window_sizes);
113113
// The shape of data stays untouched
114-
output_desc[0].shape.set_tensor_shape(i, input[i].shape());
114+
output_desc[0].shape.set_tensor_shape(i, input.tensor_shape(i));
115115
}
116116
return true;
117117
}
@@ -125,7 +125,7 @@ class GaussianBlurOpCpu : public OpImplBase<CPUBackend> {
125125

126126
int nsamples = input.shape().num_samples();
127127
for (int sample_idx = 0; sample_idx < nsamples; sample_idx++) {
128-
const auto& shape = input[sample_idx].shape();
128+
const auto& shape = input.tensor_shape(sample_idx);
129129
auto elem_volume = volume(shape.begin() + dim_desc_.usable_axes_start, shape.end());
130130

131131
int seq_elements = 1;
@@ -138,11 +138,11 @@ class GaussianBlurOpCpu : public OpImplBase<CPUBackend> {
138138
thread_pool.AddWork(
139139
[this, &input, &output, sample_idx, elem_idx, stride](int thread_id) {
140140
auto gaussian_windows = windows_[sample_idx].GetWindows();
141-
auto elem_shape = input[sample_idx].shape().template last<ndim>();
141+
auto elem_shape = input.tensor_shape(sample_idx).template last<ndim>();
142142
auto in_view = TensorView<StorageCPU, const In, ndim>{
143-
input[sample_idx].template data<In>() + stride * elem_idx, elem_shape};
143+
input.template tensor<In>(sample_idx) + stride * elem_idx, elem_shape};
144144
auto out_view = TensorView<StorageCPU, Out, ndim>{
145-
output[sample_idx].template mutable_data<Out>() + stride * elem_idx, elem_shape};
145+
output.template mutable_tensor<Out>(sample_idx) + stride * elem_idx, elem_shape};
146146
// I need a context for that particular run (or rather matching the thread &
147147
// scratchpad)
148148
auto ctx = ctx_;

dali/operators/image/distortion/jpeg_compression_distortion_op_gpu.cu

+1-1
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ void JpegCompressionDistortionGPU::RunImpl(workspace_t<GPUBackend> &ws) {
7171
// Set quality argument for an image from samples
7272
if (is_sequence) {
7373
for (int i = 0; i < nsamples; i++) {
74-
auto nframes = input.tensor_shape_span(i)[0];
74+
auto nframes = input.shape().tensor_shape_span(i)[0];
7575
for (int j = 0; j < nframes; ++j) {
7676
quality_.push_back(quality_arg_[i].data[0]);
7777
}

dali/operators/image/remap/displacement_filter_impl_gpu.cuh

+3-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2017-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
// Copyright (c) 2017-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -325,7 +325,8 @@ class DisplacementFilter<GPUBackend, Displacement,
325325
sample.input = input.template tensor<T>(sample_idx);
326326
sample.raw_params = GetDisplacementParams(sample_idx);
327327
sample.shape = shape.tensor_shape<nDims>(sample_idx);
328-
sample.mask = has_mask_ ? ws.ArgumentInput("mask")[sample_idx].data<int>()[0] : true;
328+
sample.mask =
329+
has_mask_ ? static_cast<const int *>(ws.ArgumentInput("mask")[sample_idx].data)[0] : true;
329330
}
330331

331332
samples_dev_.from_host(samples_, stream);

dali/operators/image/remap/warp_affine_params.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
// Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -139,9 +139,9 @@ class WarpAffineParamProvider
139139
auto *params = this->template AllocParams<mm::memory_kind::host>();
140140
for (int i = 0; i < num_samples_; i++) {
141141
if (invert) {
142-
params[i] = static_cast<const MappingParams *>(input[i].raw_data())->inv();
142+
params[i] = static_cast<const MappingParams *>(input.raw_data(i))->inv();
143143
} else {
144-
params[i] = *static_cast<const MappingParams *>(input[i].raw_data());
144+
params[i] = *static_cast<const MappingParams *>(input.raw_data(i));
145145
}
146146
}
147147
}

dali/operators/image/remap/warp_param_provider.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
// Copyright (c) 2019-2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
1+
// Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
22
//
33
// Licensed under the Apache License, Version 2.0 (the "License");
44
// you may not use this file except in compliance with the License.
@@ -47,7 +47,7 @@ class InterpTypeProvider {
4747
"interp_type must be a single value or contain one value per sample");
4848
interp_types_.resize(n);
4949
for (int i = 0; i < n; i++)
50-
interp_types_[i] = tensor_vector[i].data<DALIInterpType>()[0];
50+
interp_types_[i] = tensor_vector.tensor<DALIInterpType>(i)[0];
5151
} else {
5252
interp_types_.resize(1, spec.template GetArgument<DALIInterpType>("interp_type"));
5353
}

0 commit comments

Comments
 (0)