diff --git a/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/netcore/Tensor.cs b/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/netcore/Tensor.cs index c64250c0151164..c5049cb8b93a19 100644 --- a/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/netcore/Tensor.cs +++ b/src/libraries/System.Numerics.Tensors/src/System/Numerics/Tensors/netcore/Tensor.cs @@ -347,14 +347,29 @@ public static Tensor CreateFromShapeUninitialized(scoped ReadOnlySpan public static ref readonly TensorSpan FillGaussianNormalDistribution(in TensorSpan destination, Random? random = null) where T : IFloatingPoint { - Span span = MemoryMarshal.CreateSpan(ref destination._reference, (int)destination._shape.LinearLength); random ??= Random.Shared; - for (int i = 0; i < span.Length; i++) + if (destination.IsDense) { - double u1 = 1.0 - random.NextDouble(); - double u2 = 1.0 - random.NextDouble(); - span[i] = T.CreateChecked(Math.Sqrt(-2.0 * Math.Log(u1)) * Math.Sin(2.0 * Math.PI * u2)); + Span span = MemoryMarshal.CreateSpan(ref destination._reference, (int)destination.FlattenedLength); + + for (int i = 0; i < span.Length; i++) + { + double u1 = 1.0 - random.NextDouble(); + double u2 = 1.0 - random.NextDouble(); + span[i] = T.CreateChecked(Math.Sqrt(-2.0 * Math.Log(u1)) * Math.Sin(2.0 * Math.PI * u2)); + } + } + else + { + TensorSpan.Enumerator enumerator = destination.GetEnumerator(); + + while (enumerator.MoveNext()) + { + double u1 = 1.0 - random.NextDouble(); + double u2 = 1.0 - random.NextDouble(); + enumerator.Current = T.CreateChecked(Math.Sqrt(-2.0 * Math.Log(u1)) * Math.Sin(2.0 * Math.PI * u2)); + } } return ref destination; @@ -370,10 +385,25 @@ public static ref readonly TensorSpan FillGaussianNormalDistribution(in Te /// public static ref readonly TensorSpan FillUniformDistribution(in TensorSpan destination, Random? random = null) where T : IFloatingPoint { - Span span = MemoryMarshal.CreateSpan(ref destination._reference, (int)destination._shape.LinearLength); random ??= Random.Shared; - for (int i = 0; i < span.Length; i++) - span[i] = T.CreateChecked(random.NextDouble()); + + if (destination.IsDense) + { + Span span = MemoryMarshal.CreateSpan(ref destination._reference, (int)destination.FlattenedLength); + for (int i = 0; i < span.Length; i++) + { + span[i] = T.CreateChecked(random.NextDouble()); + } + } + else + { + TensorSpan.Enumerator enumerator = destination.GetEnumerator(); + + while (enumerator.MoveNext()) + { + enumerator.Current = T.CreateChecked(random.NextDouble()); + } + } return ref destination; } @@ -1549,12 +1579,33 @@ public static Tensor Resize(Tensor tensor, ReadOnlySpan lengths) nint newSize = TensorPrimitives.Product(lengths); T[] values = tensor.IsPinned ? GC.AllocateArray((int)newSize) : (new T[newSize]); Tensor output = Create(values, lengths, []); - ReadOnlySpan span = MemoryMarshal.CreateSpan(ref Unsafe.Add(ref tensor.AsTensorSpan()._reference, tensor._start), tensor._values.Length - tensor._start); - Span ospan = MemoryMarshal.CreateSpan(ref output.AsTensorSpan()._reference, (int)output.FlattenedLength); - if (newSize >= span.Length) - span.CopyTo(ospan); + + if (tensor.IsDense) + { + ReadOnlySpan span = MemoryMarshal.CreateSpan(ref Unsafe.Add(ref tensor.AsTensorSpan()._reference, tensor._start), tensor._values.Length - tensor._start); + Span ospan = MemoryMarshal.CreateSpan(ref output.AsTensorSpan()._reference, (int)output.FlattenedLength); + if (newSize >= span.Length) + { + span.CopyTo(ospan); + } + else + { + span.Slice(0, ospan.Length).CopyTo(ospan); + } + } else - span.Slice(0, ospan.Length).CopyTo(ospan); + { + nint copyLength = Math.Min(tensor.FlattenedLength, newSize); + ReadOnlyTensorSpan.Enumerator enumerator = tensor.AsReadOnlyTensorSpan().GetEnumerator(); + Span ospan = MemoryMarshal.CreateSpan(ref output.AsTensorSpan()._reference, (int)output.FlattenedLength); + + for (nint i = 0; i < copyLength; i++) + { + bool moved = enumerator.MoveNext(); + Debug.Assert(moved); + ospan[(int)i] = enumerator.Current; + } + } return output; } @@ -1589,12 +1640,33 @@ public static void ResizeTo(scoped in TensorSpan tensor, in TensorSpan /// Destination with the desired new shape. public static void ResizeTo(scoped in ReadOnlyTensorSpan tensor, in TensorSpan destination) { - ReadOnlySpan span = MemoryMarshal.CreateSpan(ref tensor._reference, (int)tensor._shape.LinearLength); - Span ospan = MemoryMarshal.CreateSpan(ref destination._reference, (int)destination._shape.LinearLength); - if (ospan.Length >= span.Length) - span.CopyTo(ospan); + if (tensor.IsDense && destination.IsDense) + { + ReadOnlySpan span = MemoryMarshal.CreateSpan(ref tensor._reference, (int)tensor.FlattenedLength); + Span ospan = MemoryMarshal.CreateSpan(ref destination._reference, (int)destination.FlattenedLength); + if (ospan.Length >= span.Length) + { + span.CopyTo(ospan); + } + else + { + span.Slice(0, ospan.Length).CopyTo(ospan); + } + } else - span.Slice(0, ospan.Length).CopyTo(ospan); + { + nint copyLength = Math.Min(tensor.FlattenedLength, destination.FlattenedLength); + ReadOnlyTensorSpan.Enumerator srcEnumerator = tensor.GetEnumerator(); + TensorSpan.Enumerator dstEnumerator = destination.GetEnumerator(); + + for (nint i = 0; i < copyLength; i++) + { + bool srcMoved = srcEnumerator.MoveNext(); + bool dstMoved = dstEnumerator.MoveNext(); + Debug.Assert(srcMoved && dstMoved); + dstEnumerator.Current = srcEnumerator.Current; + } + } } #endregion @@ -1684,10 +1756,7 @@ public static ref readonly TensorSpan ReverseDimension(scoped in ReadOnlyT public static bool SequenceEqual(this scoped in TensorSpan tensor, scoped in ReadOnlyTensorSpan other) where T : IEquatable? { - return tensor.FlattenedLength == other.FlattenedLength - && tensor._shape.LinearLength == other._shape.LinearLength - && tensor.Lengths.SequenceEqual(other.Lengths) - && MemoryMarshal.CreateReadOnlySpan(in tensor.GetPinnableReference(), (int)tensor._shape.LinearLength).SequenceEqual(MemoryMarshal.CreateReadOnlySpan(in other.GetPinnableReference(), (int)other._shape.LinearLength)); + return ((ReadOnlyTensorSpan)tensor).SequenceEqual(other); } /// @@ -1696,10 +1765,32 @@ public static bool SequenceEqual(this scoped in TensorSpan tensor, scoped public static bool SequenceEqual(this scoped in ReadOnlyTensorSpan tensor, scoped in ReadOnlyTensorSpan other) where T : IEquatable? { - return tensor.FlattenedLength == other.FlattenedLength - && tensor._shape.LinearLength == other._shape.LinearLength - && tensor.Lengths.SequenceEqual(other.Lengths) - && MemoryMarshal.CreateReadOnlySpan(in tensor.GetPinnableReference(), (int)tensor._shape.LinearLength).SequenceEqual(MemoryMarshal.CreateReadOnlySpan(in other.GetPinnableReference(), (int)other._shape.LinearLength)); + if (tensor.FlattenedLength != other.FlattenedLength + || !tensor.Lengths.SequenceEqual(other.Lengths)) + { + return false; + } + + if (tensor.IsDense && other.IsDense) + { + return MemoryMarshal.CreateReadOnlySpan(in tensor.GetPinnableReference(), (int)tensor.FlattenedLength).SequenceEqual(MemoryMarshal.CreateReadOnlySpan(in other.GetPinnableReference(), (int)other.FlattenedLength)); + } + + ReadOnlyTensorSpan.Enumerator enumerator1 = tensor.GetEnumerator(); + ReadOnlyTensorSpan.Enumerator enumerator2 = other.GetEnumerator(); + + while (enumerator1.MoveNext()) + { + bool moved = enumerator2.MoveNext(); + Debug.Assert(moved); + + if (!EqualityComparer.Default.Equals(enumerator1.Current, enumerator2.Current)) + { + return false; + } + } + + return true; } #endregion @@ -3528,8 +3619,21 @@ public static ref readonly TensorSpan ILogB(scoped in ReadOnlyTensorSpan public static nint IndexOfMax(scoped in ReadOnlyTensorSpan x) where T : INumber { - ReadOnlySpan span = MemoryMarshal.CreateSpan(ref x._reference, (int)x._shape.LinearLength); - return TensorPrimitives.IndexOfMax(span); + if (x.IsDense) + { + ReadOnlySpan span = MemoryMarshal.CreateSpan(ref x._reference, (int)x.FlattenedLength); + return TensorPrimitives.IndexOfMax(span); + } + + return IndexOfMaxFallback(x); + } + + private static nint IndexOfMaxFallback(scoped in ReadOnlyTensorSpan x) + where T : INumber + { + T[] flat = new T[x.FlattenedLength]; + x.FlattenTo(flat); + return TensorPrimitives.IndexOfMax(flat); } #endregion @@ -3540,8 +3644,21 @@ public static nint IndexOfMax(scoped in ReadOnlyTensorSpan x) public static nint IndexOfMaxMagnitude(scoped in ReadOnlyTensorSpan x) where T : INumber { - ReadOnlySpan span = MemoryMarshal.CreateSpan(ref x._reference, (int)x._shape.LinearLength); - return TensorPrimitives.IndexOfMaxMagnitude(span); + if (x.IsDense) + { + ReadOnlySpan span = MemoryMarshal.CreateSpan(ref x._reference, (int)x.FlattenedLength); + return TensorPrimitives.IndexOfMaxMagnitude(span); + } + + return IndexOfMaxMagnitudeFallback(x); + } + + private static nint IndexOfMaxMagnitudeFallback(scoped in ReadOnlyTensorSpan x) + where T : INumber + { + T[] flat = new T[x.FlattenedLength]; + x.FlattenTo(flat); + return TensorPrimitives.IndexOfMaxMagnitude(flat); } #endregion @@ -3551,8 +3668,21 @@ public static nint IndexOfMaxMagnitude(scoped in ReadOnlyTensorSpan x) public static nint IndexOfMin(scoped in ReadOnlyTensorSpan x) where T : INumber { - ReadOnlySpan span = MemoryMarshal.CreateSpan(ref x._reference, (int)x._shape.LinearLength); - return TensorPrimitives.IndexOfMin(span); + if (x.IsDense) + { + ReadOnlySpan span = MemoryMarshal.CreateSpan(ref x._reference, (int)x.FlattenedLength); + return TensorPrimitives.IndexOfMin(span); + } + + return IndexOfMinFallback(x); + } + + private static nint IndexOfMinFallback(scoped in ReadOnlyTensorSpan x) + where T : INumber + { + T[] flat = new T[x.FlattenedLength]; + x.FlattenTo(flat); + return TensorPrimitives.IndexOfMin(flat); } #endregion @@ -3564,8 +3694,21 @@ public static nint IndexOfMin(scoped in ReadOnlyTensorSpan x) public static nint IndexOfMinMagnitude(scoped in ReadOnlyTensorSpan x) where T : INumber { - ReadOnlySpan span = MemoryMarshal.CreateSpan(ref x._reference, (int)x._shape.LinearLength); - return TensorPrimitives.IndexOfMinMagnitude(span); + if (x.IsDense) + { + ReadOnlySpan span = MemoryMarshal.CreateSpan(ref x._reference, (int)x.FlattenedLength); + return TensorPrimitives.IndexOfMinMagnitude(span); + } + + return IndexOfMinMagnitudeFallback(x); + } + + private static nint IndexOfMinMagnitudeFallback(scoped in ReadOnlyTensorSpan x) + where T : INumber + { + T[] flat = new T[x.FlattenedLength]; + x.FlattenTo(flat); + return TensorPrimitives.IndexOfMinMagnitude(flat); } #endregion diff --git a/src/libraries/System.Numerics.Tensors/tests/TensorTests.cs b/src/libraries/System.Numerics.Tensors/tests/TensorTests.cs index db11389bd64c8c..35d85ad38c0e61 100644 --- a/src/libraries/System.Numerics.Tensors/tests/TensorTests.cs +++ b/src/libraries/System.Numerics.Tensors/tests/TensorTests.cs @@ -714,6 +714,392 @@ public static void TensorCosineSimilarityTests() // Assert.Throws(() => Tensor.SequenceEqual(t0, t1)); //} + /// + /// Provides test cases of (dataArray, shape, strides) for creating non-dense tensor spans. + /// Each case has known logical elements that differ from the raw buffer layout. + /// + public static IEnumerable NonDenseTensorData() + { + // 2x2 from 1D array with stride gap: logical elements are [10, 20, 30, 40] + yield return new object[] { new int[] { 10, 20, 99, 99, 30, 40, 99, 99 }, new nint[] { 2, 2 }, new nint[] { 4, 1 }, new int[] { 10, 20, 30, 40 } }; + // 2x3 from 1D array with stride gap: logical elements are [1, 2, 3, 4, 5, 6] + yield return new object[] { new int[] { 1, 2, 3, 99, 4, 5, 6, 99 }, new nint[] { 2, 3 }, new nint[] { 4, 1 }, new int[] { 1, 2, 3, 4, 5, 6 } }; + // 3x2 from 1D array with stride gap: logical elements are [1, 2, 3, 4, 5, 6] + yield return new object[] { new int[] { 1, 2, 99, 3, 4, 99, 5, 6, 99 }, new nint[] { 3, 2 }, new nint[] { 3, 1 }, new int[] { 1, 2, 3, 4, 5, 6 } }; + // 1x2 from 1D array with larger stride gap + yield return new object[] { new int[] { 42, 99, 99, 99, 7, 99, 99, 99 }, new nint[] { 2 }, new nint[] { 4 }, new int[] { 42, 7 } }; + } + + [Theory] + [MemberData(nameof(NonDenseTensorData))] + public static void TensorSequenceEqualNonDenseTests(int[] data, nint[] shape, nint[] strides, int[] expectedLogical) + { + var ts1 = new ReadOnlyTensorSpan(data, shape, strides); + Assert.False(ts1.IsDense); + + // Non-dense vs non-dense with same logical elements but different gap values + int[] data2 = (int[])data.Clone(); + for (int i = 0; i < data2.Length; i++) + { + if (data2[i] == 99) + { + data2[i] = 77; + } + } + var ts2 = new ReadOnlyTensorSpan(data2, shape, strides); + Assert.False(ts2.IsDense); + Assert.True(ts1.SequenceEqual(ts2)); + + // Non-dense vs dense with same logical elements + var tsDense = new ReadOnlyTensorSpan(expectedLogical, shape); + Assert.True(tsDense.IsDense); + Assert.True(ts1.SequenceEqual(tsDense)); + Assert.True(tsDense.SequenceEqual(ts1)); + + // TensorSpan overload also works + var tspan1 = new TensorSpan(data, shape, strides); + Assert.True(tspan1.SequenceEqual(ts2)); + + // Differing logical element should return false + int[] data3 = (int[])data.Clone(); + data3[0] = data3[0] + 1; + var ts3 = new ReadOnlyTensorSpan(data3, shape, strides); + Assert.False(ts1.SequenceEqual(ts3)); + } + + [Theory] + [MemberData(nameof(NonDenseTensorData))] + public static void TensorFillGaussianNormalDistributionNonDenseTests(int[] data, nint[] shape, nint[] strides, int[] expectedLogical) + { + _ = expectedLogical; + double[] dblData = new double[data.Length]; + var ts = new TensorSpan(dblData, shape, strides); + Assert.False(ts.IsDense); + + Tensor.FillGaussianNormalDistribution(ts, new Random(42)); + + // All logical elements should be filled + foreach (double val in ts) + { + Assert.NotEqual(0.0, val); + } + + // Gap positions should remain untouched (zero) + var denseSpan = new TensorSpan(dblData, shape, strides); + var enumerator = denseSpan.GetEnumerator(); + HashSet logicalOffsets = new HashSet(); + nint[] indexes = new nint[shape.Length]; + for (nint i = 0; i < ts.FlattenedLength; i++) + { + nint offset = 0; + // Compute i-th logical element's linear offset + nint remaining = i; + for (int d = shape.Length - 1; d >= 0; d--) + { + nint dimIndex = remaining % shape[d]; + remaining /= shape[d]; + offset += dimIndex * strides[d]; + } + logicalOffsets.Add((int)offset); + } + + for (int i = 0; i < dblData.Length; i++) + { + if (!logicalOffsets.Contains(i)) + { + Assert.Equal(0.0, dblData[i]); + } + } + } + + [Theory] + [MemberData(nameof(NonDenseTensorData))] + public static void TensorFillUniformDistributionNonDenseTests(int[] data, nint[] shape, nint[] strides, int[] expectedLogical) + { + _ = expectedLogical; + double[] dblData = new double[data.Length]; + var ts = new TensorSpan(dblData, shape, strides); + Assert.False(ts.IsDense); + + Tensor.FillUniformDistribution(ts, new Random(42)); + + // All logical elements should be filled with values in [0, 1) + foreach (double val in ts) + { + Assert.InRange(val, 0.0, 1.0); + Assert.NotEqual(0.0, val); + } + + // Gap positions should remain untouched (zero) + HashSet logicalOffsets = new HashSet(); + for (nint i = 0; i < ts.FlattenedLength; i++) + { + nint offset = 0; + nint remaining = i; + for (int d = shape.Length - 1; d >= 0; d--) + { + nint dimIndex = remaining % shape[d]; + remaining /= shape[d]; + offset += dimIndex * strides[d]; + } + logicalOffsets.Add((int)offset); + } + + for (int i = 0; i < dblData.Length; i++) + { + if (!logicalOffsets.Contains(i)) + { + Assert.Equal(0.0, dblData[i]); + } + } + } + + [Theory] + [MemberData(nameof(NonDenseTensorData))] + public static void TensorIndexOfMaxNonDenseTests(int[] data, nint[] shape, nint[] strides, int[] expectedLogical) + { + _ = expectedLogical; + // Set a known max in the data at logical position + int[] testData = (int[])data.Clone(); + // First, set all logical elements to small values + nint flatLen = 1; + foreach (nint s in shape) + { + flatLen *= s; + } + + // Place the maximum value at the last logical position + nint lastOffset = 0; + nint rem = flatLen - 1; + for (int d = shape.Length - 1; d >= 0; d--) + { + nint dimIndex = rem % shape[d]; + rem /= shape[d]; + lastOffset += dimIndex * strides[d]; + } + testData[(int)lastOffset] = 9999; + + var ts = new ReadOnlyTensorSpan(testData, shape, strides); + Assert.False(ts.IsDense); + + nint idx = Tensor.IndexOfMax(ts); + Assert.Equal(flatLen - 1, idx); + } + + [Theory] + [MemberData(nameof(NonDenseTensorData))] + public static void TensorIndexOfMinNonDenseTests(int[] data, nint[] shape, nint[] strides, int[] expectedLogical) + { + _ = expectedLogical; + // Set all logical elements to positive values, then put the minimum at the last logical position + int[] testData = new int[data.Length]; + Array.Fill(testData, 50); + + nint flatLen = 1; + foreach (nint s in shape) + { + flatLen *= s; + } + + // Set all logical positions to values > 0 + for (nint i = 0; i < flatLen; i++) + { + nint offset = 0; + nint remaining = i; + for (int d = shape.Length - 1; d >= 0; d--) + { + nint dimIndex = remaining % shape[d]; + remaining /= shape[d]; + offset += dimIndex * strides[d]; + } + testData[(int)offset] = (int)(i + 10); + } + + // Place the minimum at the last logical position + nint lastOffset = 0; + nint rem = flatLen - 1; + for (int d = shape.Length - 1; d >= 0; d--) + { + nint dimIndex = rem % shape[d]; + rem /= shape[d]; + lastOffset += dimIndex * strides[d]; + } + testData[(int)lastOffset] = -1; + + var ts = new ReadOnlyTensorSpan(testData, shape, strides); + Assert.False(ts.IsDense); + + nint idx = Tensor.IndexOfMin(ts); + Assert.Equal(flatLen - 1, idx); + } + + [Theory] + [MemberData(nameof(NonDenseTensorData))] + public static void TensorIndexOfMaxMagnitudeNonDenseTests(int[] data, nint[] shape, nint[] strides, int[] expectedLogical) + { + _ = expectedLogical; + int[] testData = new int[data.Length]; + Array.Fill(testData, 1); + + nint flatLen = 1; + foreach (nint s in shape) + { + flatLen *= s; + } + + // Set all logical positions to small values + for (nint i = 0; i < flatLen; i++) + { + nint offset = 0; + nint remaining = i; + for (int d = shape.Length - 1; d >= 0; d--) + { + nint dimIndex = remaining % shape[d]; + remaining /= shape[d]; + offset += dimIndex * strides[d]; + } + testData[(int)offset] = (int)(i + 1); + } + + // Place the max magnitude at the last logical position + nint lastOffset = 0; + nint rem = flatLen - 1; + for (int d = shape.Length - 1; d >= 0; d--) + { + nint dimIndex = rem % shape[d]; + rem /= shape[d]; + lastOffset += dimIndex * strides[d]; + } + testData[(int)lastOffset] = -9999; + + var ts = new ReadOnlyTensorSpan(testData, shape, strides); + Assert.False(ts.IsDense); + + nint idx = Tensor.IndexOfMaxMagnitude(ts); + Assert.Equal(flatLen - 1, idx); + } + + [Theory] + [MemberData(nameof(NonDenseTensorData))] + public static void TensorIndexOfMinMagnitudeNonDenseTests(int[] data, nint[] shape, nint[] strides, int[] expectedLogical) + { + _ = expectedLogical; + int[] testData = new int[data.Length]; + Array.Fill(testData, 100); + + nint flatLen = 1; + foreach (nint s in shape) + { + flatLen *= s; + } + + // Set all logical positions to large magnitude values + for (nint i = 0; i < flatLen; i++) + { + nint offset = 0; + nint remaining = i; + for (int d = shape.Length - 1; d >= 0; d--) + { + nint dimIndex = remaining % shape[d]; + remaining /= shape[d]; + offset += dimIndex * strides[d]; + } + testData[(int)offset] = (int)(i + 100); + } + + // Place the min magnitude at the first logical position + testData[0] = 0; + + var ts = new ReadOnlyTensorSpan(testData, shape, strides); + Assert.False(ts.IsDense); + + nint idx = Tensor.IndexOfMinMagnitude(ts); + Assert.Equal(0, idx); + } + + [Theory] + [MemberData(nameof(NonDenseTensorData))] + public static void TensorResizeToNonDenseSourceTests(int[] data, nint[] shape, nint[] strides, int[] expectedLogical) + { + var src = new ReadOnlyTensorSpan(data, shape, strides); + Assert.False(src.IsDense); + + // Resize to a larger dense destination + nint srcFlatLen = src.FlattenedLength; + int[] dstData = new int[(int)srcFlatLen + 2]; + var dst = new TensorSpan(dstData, [(nint)dstData.Length], [1]); + Assert.True(dst.IsDense); + + Tensor.ResizeTo(src, dst); + + for (int i = 0; i < expectedLogical.Length; i++) + { + Assert.Equal(expectedLogical[i], dstData[i]); + } + // Extra positions should be zero-filled + for (int i = expectedLogical.Length; i < dstData.Length; i++) + { + Assert.Equal(0, dstData[i]); + } + } + + [Theory] + [MemberData(nameof(NonDenseTensorData))] + public static void TensorResizeToNonDenseDestinationTests(int[] data, nint[] shape, nint[] strides, int[] expectedLogical) + { + // Create a dense source with known values + var src = new ReadOnlyTensorSpan(expectedLogical, [(nint)expectedLogical.Length], [1]); + Assert.True(src.IsDense); + + // Create a non-dense destination + int[] dstData = new int[data.Length]; + var dst = new TensorSpan(dstData, shape, strides); + Assert.False(dst.IsDense); + + nint copyLength = Math.Min(src.FlattenedLength, dst.FlattenedLength); + Tensor.ResizeTo(src, dst); + + // Verify logical elements were written correctly + int logicalIdx = 0; + foreach (int val in dst) + { + if (logicalIdx < expectedLogical.Length) + { + Assert.Equal(expectedLogical[logicalIdx], val); + } + logicalIdx++; + } + } + + [Fact] + public static void TensorResizeNonDenseTests() + { + // 2x3 tensor, slice to 2x2 (non-dense), then resize + Tensor tensor = Tensor.Create([10, 20, 30, 40, 50, 60], [2, 3]); + Tensor sliced = tensor.Slice(0..2, 0..2); + Assert.False(sliced.IsDense); + + Tensor resized = Tensor.Resize(sliced, [3]); + Assert.Equal(3, resized.FlattenedLength); + Assert.Equal(10, resized[0]); + Assert.Equal(20, resized[1]); + Assert.Equal(40, resized[2]); + + // 3x4 tensor, slice to 2x2 (non-dense), then resize larger + Tensor tensor2 = Tensor.Create(Enumerable.Range(1, 12).ToArray(), [3, 4]); + Tensor sliced2 = tensor2.Slice(0..2, 0..2); + Assert.False(sliced2.IsDense); + + Tensor resized2 = Tensor.Resize(sliced2, [6]); + Assert.Equal(6, resized2.FlattenedLength); + Assert.Equal(1, resized2[0]); + Assert.Equal(2, resized2[1]); + Assert.Equal(5, resized2[2]); + Assert.Equal(6, resized2[3]); + Assert.Equal(0, resized2[4]); + Assert.Equal(0, resized2[5]); + } + [Fact] public static void TensorMultiplyTests() {