-
Notifications
You must be signed in to change notification settings - Fork 15
/
Copy pathTTNNOps.td
1332 lines (1075 loc) · 40.5 KB
/
TTNNOps.td
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// SPDX-FileCopyrightText: (c) 2024 Tenstorrent AI ULC
//
// SPDX-License-Identifier: Apache-2.0
#ifndef TTMLIR_TTMLIR_DIALECT_TTNN_TTNNOPS_TD
#define TTMLIR_TTMLIR_DIALECT_TTNN_TTNNOPS_TD
include "ttmlir/Dialect/TT/IR/TTOpsTypes.td"
include "ttmlir/Dialect/TTNN/IR/TTNNOpsAttrs.td"
include "ttmlir/Dialect/TTNN/IR/TTNNBase.td"
include "ttmlir/Dialect/TTNN/IR/TTNNOpsTypes.td"
include "ttmlir/Dialect/TTNN/IR/TTNNOpsEnums.td"
include "mlir/Interfaces/InferTypeOpInterface.td"
include "mlir/Interfaces/DestinationStyleOpInterface.td"
include "mlir/Interfaces/ControlFlowInterfaces.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
include "mlir/IR/CommonTypeConstraints.td"
include "mlir/IR/CommonAttrConstraints.td"
def TTNN_GetDeviceOp : TTNN_Op<"get_device"> {
let summary = "Get Device op.";
let description = [{
This op returns the current runtime device.
}];
let arguments = (ins OptionalAttr<TTNN_MeshShapeAttr>:$mesh_shape);
let results = (outs TT_Device:$device);
}
def TTNN_ToMemoryConfigOp : TTNN_Op<"to_memory_config"> {
let summary = "ToMemoryConfig op.";
let description = [{
This op converts the memory config of the input tensor based on the given memory config.
It handles:
- Dram to L1
- L1 to Dram
- Interleaved to sharded
- Sharded to interleaved
- Sharded to sharded (reshard)
}];
let arguments = (ins AnyRankedTensor:$input,
TTNN_MemoryConfigAttr:$memory_config);
let results = (outs AnyRankedTensor:$result);
let hasVerifier = 1;
}
def TTNN_ToLayoutOp : TTNN_Op<"to_layout"> {
let summary = "ToLayout op.";
let description = [{
This op wraps all layout information gathered from ttir.toLayout. It is used/updated by the optimizer
to perform optimizations, and later broken down into specific memory/layout operations (toDevice, toMemoryConfig etc.).
Currently in the TTNN backend, we use this op solely for tilize/untilize, therefore marking all other attrs as optional.
Once ttnn::to_layout supports other attrs, we can remove the optional tag.
}];
let arguments = (ins AnyRankedTensor:$input,
TTNN_LayoutAttr:$layout,
OptionalAttr<TT_DataTypeAttr>:$dtype,
OptionalAttr<TTNN_MemoryConfigAttr>:$memory_config,
Optional<TT_Device>:$device);
let results = (outs AnyRankedTensor:$result);
let hasCanonicalizeMethod = 1;
}
def TTNN_TypecastOp : TTNN_Op<"typecast"> {
let summary = "Typecast op.";
let description = [{
This op converts the data type of the input tensor based on the given data type.
It handles:
- conversions of data types.
}];
let arguments = (ins AnyRankedTensor:$input,
TT_DataTypeAttr:$dtype);
let results = (outs AnyRankedTensor:$result);
}
def TTNN_ToDeviceOp : TTNN_Op<"to_device"> {
let summary = "ToDevice op.";
let description = [{
This op sends the input tensor to the given device with the given memory config.
}];
let arguments = (ins AnyRankedTensor:$input,
TT_Device:$device,
OptionalAttr<TTNN_MemoryConfigAttr>:$memory_config);
let results = (outs AnyRankedTensor:$result);
}
def TTNN_FromDeviceOp : TTNN_Op<"from_device"> {
let summary = "FromDevice op.";
let description = [{
This op retrieves the input tensor from the given device.
}];
let arguments = (ins AnyRankedTensor:$input);
let results = (outs AnyRankedTensor:$result);
}
class TTNN_NamedDPSOp<string mnemonic, list<Trait> traits = []> :
TTNN_Op<mnemonic, !listconcat(traits, [DestinationStyleOpInterface])> {
let extraClassDeclaration = [{
MutableOperandRange getDpsInitsMutable() { return getOutputsMutable(); }
}];
}
class TTNN_ElementwiseOp<string mnemonic, list<Trait> traits = []> :
TTNN_NamedDPSOp<mnemonic, !listconcat(traits, [AttrSizedOperandSegments])> {
let arguments = (ins Variadic<AnyRankedTensor>:$inputs,
Variadic<AnyRankedTensor>:$outputs);
let results = (outs Variadic<AnyRankedTensor>:$results);
}
class TTNN_ElementwiseUnaryOp<string mnemonic, list<Trait> traits = []> :
TTNN_ElementwiseOp<mnemonic, traits> {
let summary = "Eltwise unary op.";
let description = [{
Eltwise unary op.
}];
let builders =
[
OpBuilder<(ins "Value": $in, "Value": $out),
[{
build($_builder, $_state, {out.getType()}, in, out);
}]>
];
}
class TTNN_ElementwiseBinaryOp<string mnemonic, list<Trait> traits = []> :
TTNN_ElementwiseOp<mnemonic, traits> {
let summary = "Eltwise binary op.";
let description = [{
Eltwise binary op.
}];
let builders =
[
OpBuilder<(ins "Value": $lhs, "Value": $rhs, "Value": $out),
[{
build($_builder, $_state, {out.getType()}, {lhs, rhs}, out);
}]>
];
}
class TTNN_ElementwiseTernaryOp<string mnemonic, list<Trait> traits = []> :
TTNN_ElementwiseOp<mnemonic, traits> {
let summary = "Eltwise ternary op.";
let description = [{
Eltwise ternary op.
}];
let builders =
[
OpBuilder<(ins "Value": $first, "Value": $second, "Value": $third, "Value": $out),
[{
build($_builder, $_state, {out.getType()}, {first, second, third}, out);
}]>
];
}
def TTNN_WhereOp : TTNN_ElementwiseTernaryOp<"where"> {
let summary = "Eltwise where.";
let description = [{
Eltwise where operation.
}];
}
def TTNN_AbsOp : TTNN_ElementwiseUnaryOp<"abs"> {
let summary = "Eltwise absolute.";
let description = [{
Eltwise absolute operation.
}];
}
def TTNN_CbrtOp : TTNN_ElementwiseUnaryOp<"cbrt"> {
let summary = "Eltwise cubic root.";
let description = [{
Eltwise cubic root operation.
}];
}
def TTNN_CeilOp : TTNN_ElementwiseUnaryOp<"ceil"> {
let summary = "Eltwise ceil.";
let description = [{
Eltwise ceil operation.
}];
}
def TTNN_SignOp: TTNN_ElementwiseUnaryOp<"sign"> {
let summary = "Eltwise sign operation.";
let description = [{
Returns the sign of the `operand` element-wise and produces a `result`
tensor.
Example:
%a: [[3, -2, 0], [1, -4, 4]]
"ttnn.sign"(%a, %out) -> %out: [[1, -1, 0], [1, -1, 1]]
}];
}
def TTNN_CosOp : TTNN_ElementwiseUnaryOp<"cos"> {
let summary = "Eltwise cosine.";
let description = [{
Eltwise cosine operation.
}];
}
def TTNN_ExpOp : TTNN_ElementwiseUnaryOp<"exp"> {
let summary = "Eltwise exponential.";
let description = [{
Eltwise exponential operation.
}];
}
def TTNN_FloorOp: TTNN_ElementwiseUnaryOp<"floor"> {
let summary = "Eltwise floor op.";
let description = [{
Eltwise floor operation.
}];
}
def TTNN_GeluOp: TTNN_ElementwiseUnaryOp<"gelu"> {
let summary = "Eltwise GELU.";
let description = [{
Eltwise GELU operation.
}];
}
def TTNN_IsFiniteOp: TTNN_ElementwiseUnaryOp<"isfinite"> {
let summary = "Eltwise isfinite op.";
let description = [{
Eltwise isfinite operation.
}];
}
def TTNN_LogicalNotOp: TTNN_ElementwiseUnaryOp<"logical_not"> {
let summary = "Eltwise logical not op.";
let description = [{
Eltwise logical not operation.
}];
}
def TTNN_BitwiseNotOp : TTNN_ElementwiseUnaryOp<"bitwise_not"> {
let summary = "Eltwise bitwise NOT.";
let description = [{
Performs element-wise NOT of tensor `operand` and produces a `result` tensor.
Example:
// Bitwise operation with with integer tensors
// %operand: [[1, 2], [3, 4]]
%result = "ttnn.bitwise_not"(%operand) : (tensor<2x2xi32>) -> tensor<2x2xi32>
// %result: [[-2, -3], [-4, -5]]
}];
}
def TTNN_NegOp : TTNN_ElementwiseUnaryOp<"neg"> {
let summary = "Eltwise negate.";
let description = [{
Eltwise negate operation.
}];
}
def TTNN_TanOp: TTNN_ElementwiseUnaryOp<"tan"> {
let summary = "Eltwise tan op.";
let description = [{
Eltwise tan operation.
}];
}
def TTNN_TanhOp: TTNN_ElementwiseUnaryOp<"tanh"> {
let summary = "Eltwise tanh op.";
let description = [{
Eltwise tanh operation.
}];
}
def TTNN_ReciprocalOp : TTNN_ElementwiseUnaryOp<"reciprocal"> {
let summary = "Eltwise reciprocal.";
let description = [{
Eltwise reciprocal operation.
}];
}
def TTNN_ReluOp : TTNN_ElementwiseUnaryOp<"relu",
[DeclareOpInterfaceMethods<TTNN_OpModelInterface, ["getOpConstraints", "getOpRuntime"]>]
> {
let summary = "Eltwise ReLU.";
let description = [{
Eltwise ReLU operation.
}];
}
def TTNN_SinOp : TTNN_ElementwiseUnaryOp<"sin"> {
let summary = "Eltwise sine.";
let description = [{
Eltwise sine operation.
}];
}
def TTNN_SqrtOp : TTNN_ElementwiseUnaryOp<"sqrt"> {
let summary = "Eltwise sqrt.";
let description = [{
Eltwise sqrt operation.
}];
}
def TTNN_RsqrtOp : TTNN_ElementwiseUnaryOp<"rsqrt"> {
let summary = "Eltwise rsqrt.";
let description = [{
Eltwise rsqrt operation.
}];
}
def TTNN_SigmoidOp : TTNN_ElementwiseUnaryOp<"sigmoid"> {
let summary = "Eltwise sigmoid.";
let description = [{
Eltwise sigmoid operation.
}];
}
def TTNN_LogOp : TTNN_ElementwiseUnaryOp<"log"> {
let summary = "Eltwise logarithm.";
let description = [{
Eltwise logarithm operation.
}];
}
def TTNN_Log1pOp: TTNN_ElementwiseUnaryOp<"log1p"> {
let summary = "Eltwise log1p operation.";
let description = [{
Performs element-wise logarithm plus one operation on `operand` tensor and
puts the result in the output tensor.
Example:
%a: [0.0, -0.999, 7.0, 6.38905621, 15.0]
"ttnn.logp1"(%a, %out) -> %out: [0.0, -6.90776825, 2.07944155, 2.0, 2.77258873]
}];
}
def TTNN_Expm1Op: TTNN_ElementwiseUnaryOp<"expm1"> {
let description = [{
Performs element-wise exponential minus one operation on `operand` tensor
and stores the result in the output tensor.
Example:
%a: [[0, 1], [0, 0]]
"ttnn.exmp1"(%a, %out) -> %out: [[0, 1.71828], [0, 0]]
}];
}
class TTNN_ElementwiseUnaryWithFloatParameterOp<string mnemonic, list<Trait> traits = []> :
TTNN_ElementwiseUnaryOp<mnemonic, traits> {
let summary = "Eltwise unary op with the float parameter.";
let description = [{
Eltwise unary op with the float parameter.
}];
let arguments = (ins Variadic<AnyRankedTensor>:$inputs,
Variadic<AnyRankedTensor>:$outputs,
F32Attr:$parameter);
let builders =
[
OpBuilder<(ins "Value": $in, "Value": $out, "FloatAttr":$parameter),
[{
build($_builder, $_state, {out.getType()}, {in}, {out}, parameter);
}]>
];
}
def TTNN_LeakyReluOp : TTNN_ElementwiseUnaryWithFloatParameterOp<"leaky_relu"> {
let summary = "Eltwise leaky relu operation.";
let description = [{
The Leaky ReLU (Rectified Linear Unit) operation computes an element-wise
activation function over its input tensor. It is defined as:
y = x if x > 0
y = parameter * x if x <= 0
where `parameter` is a small, user-defined constant that determines the slope for
negative inputs.
Attributes:
- `parameter` (float): The slope for negative values.
Inputs:
- `input` (Tensor): The input tensor to be activated.
Outputs:
- `output` (Tensor): The tensor after applying the Leaky ReLU activation.
}];
}
def TTNN_AddOp : TTNN_ElementwiseBinaryOp<"add",
[DeclareOpInterfaceMethods<TTNN_OpModelInterface, ["getOpConstraints", "getOpRuntime"]>]
> {
let summary = "Eltwise add.";
let description = [{
Eltwise add operation.
}];
}
def TTNN_DivOp : TTNN_ElementwiseBinaryOp<"div"> {
let summary = "Eltwise divide.";
let description = [{
Eltwise divide operation.
}];
}
def TTNN_EqualOp : TTNN_ElementwiseBinaryOp<"eq"> {
let summary = "Eltwise equal to.";
let description = [{
Eltwise equal to operation.
}];
}
def TTNN_NotEqualOp : TTNN_ElementwiseBinaryOp<"ne"> {
let summary = "Eltwise not equal to.";
let description = [{
Eltwise not equal to operation.
}];
}
def TTNN_GreaterEqualOp : TTNN_ElementwiseBinaryOp<"ge"> {
let summary = "Eltwise greater than or equal to.";
let description = [{
Eltwise greater than or equal to operation.
}];
}
def TTNN_GreaterThanOp : TTNN_ElementwiseBinaryOp<"gt"> {
let summary = "Eltwise greater than.";
let description = [{
Eltwise greater than operation.
}];
}
def TTNN_LessEqualOp : TTNN_ElementwiseBinaryOp<"le"> {
let summary = "Eltwise less than or equal to.";
let description = [{
Eltwise less than or equal to operation.
}];
}
def TTNN_LessThanOp : TTNN_ElementwiseBinaryOp<"lt"> {
let summary = "Eltwise less than.";
let description = [{
Eltwise less than operation.
}];
}
def TTNN_LogicalAndOp : TTNN_ElementwiseBinaryOp<"logical_and"> {
let summary = "Eltwise logical and.";
let description = [{
Eltwise logical and operation.
}];
}
def TTNN_LogicalOrOp : TTNN_ElementwiseBinaryOp<"logical_or"> {
let summary = "Eltwise logical or.";
let description = [{
Eltwise logical or operation.
}];
}
def TTNN_LogicalXorOp : TTNN_ElementwiseBinaryOp<"logical_xor"> {
let summary = "Eltwise logical xor.";
let description = [{
Eltwise logical xor operation.
}];
}
def TTNN_BitwiseAndOp : TTNN_ElementwiseBinaryOp<"bitwise_and"> {
let summary = "Eltwise bitwise AND.";
let description = [{
Performs element-wise bitwise AND of two tensors `lhs` and `rhs`
and produces a `result` tensor.
Example:
// %lhs: [[1, 2], [3, 4]]
// %rhs: [[5, 6], [7, 8]]
%result = "ttnn.bitwise_and"(%lhs, %rhs) : (tensor<2x2xi32>, tensor<2x2xi32>) -> tensor<2x2xi32>
// %result: [[1, 2], [3, 0]]
}];
}
def TTNN_BitwiseOrOp : TTNN_ElementwiseBinaryOp<"bitwise_or"> {
let summary = "Eltwise bitwise OR.";
let description = [{
Performs element-wise bitwise OR of two tensors `lhs` and `rhs`
and produces a `result` tensor.
Example:
// %lhs: [[1, 2], [3, 4]]
// %rhs: [[5, 6], [7, 8]]
%result = "ttnn.bitwise_or"(%lhs, %rhs) : (tensor<2x2xi32>, tensor<2x2xi32>) -> tensor<2x2xi32>
// %result: [[5, 6], [7, 12]]
}];
}
def TTNN_BitwiseXorOp : TTNN_ElementwiseBinaryOp<"bitwise_xor"> {
let summary = "Eltwise bitwise XOR.";
let description = [{
Performs element-wise bitwise XOR of two tensors `lhs` and `rhs`
and produces a `result` tensor.
Example:
// %lhs: [[1, 2], [3, 4]]
// %rhs: [[5, 6], [7, 8]]
%result = "ttnn.bitwise_xor"(%lhs, %rhs) : (tensor<2x2xi32>, tensor<2x2xi32>) -> tensor<2x2xi32>
// %result: [[4, 4], [4, 12]]
}];
}
def TTNN_MaximumOp : TTNN_ElementwiseBinaryOp<"maximum"> {
let summary = "Eltwise maximum OP.";
let description = [{
Calculates maximum of input tensors' values element-wise and stores result in output tensor.
Example:
%lhs: [[3, 2, 7], [1, 4, 4]]
%rhs: [[1, 4, 2], [1, 2, 3]]
"ttnn.maximum"(%lhs, %rhs, %out) -> %out: [[3, 4, 7], [1, 4, 4]]
}];
}
def TTNN_MinimumOp : TTNN_ElementwiseBinaryOp<"minimum"> {
let summary = "Eltwise minimum OP.";
let description = [{
Calculates minimum of input tensors' values element-wise and stores result
in output tensor.
Example:
%lhs: [[3, 2, 7], [1, 4, 4]]
%rhs: [[1, 4, 2], [1, 2, 3]]
"ttnn.minimum"(%lhs, %rhs, %out) -> %out: [[1, 2, 2], [1, 2, 3]]
}];
}
def TTNN_MultiplyOp : TTNN_ElementwiseBinaryOp<"multiply"> {
let summary = "Eltwise multiply.";
let description = [{
Eltwise multiply operation.
}];
}
def TTNN_SubtractOp : TTNN_ElementwiseBinaryOp<"subtract"> {
let summary = "Eltwise subtract.";
let description = [{
Eltwise subtract operation.
}];
}
def TTNN_RemainderOp : TTNN_ElementwiseBinaryOp<"remainder"> {
let summary = "Eltwise remainder.";
let description = [{
Performs element-wise remainder of dividend lhs and divisor rhs tensors and produces a
result tensor.
Example:
// %lhs: [17, -17, 17, -17]
// %rhs: [3, 3, -3, -3]
%result = "ttnn.remainder"(%lhs, %rhs) : (tensor<4xi64>, tensor<4xi64>) -> tensor<4xi64>
// %result: [2, -2, 2, -2]
}];
}
def TTNN_PowerOp : TTNN_ElementwiseBinaryOp<"pow"> {
let summary = "Eltwise power OP.";
let description = [{
Performs element-wise exponentiation of lhs tensor by rhs tensor and produces a
result tensor. Tensors must be of same shape.
Example:
```
%result = "ttnn.pow"(%lhs, %rhs) : (tensor<6xf64>, tensor<6xf64>) -> tensor<6xf64>
%lhs: [-2.0, -0.0, -36.0, 5.0, 3.0, 10000.0]
%rhs: [2.0, 2.0, 1.1, 2.0, -1.0, 10.0]
%result: [4.0, 0.0, -nan, 25.0, 0.333333343, inf]
```
}];
}
class TTNN_ReductionOp<string mnemonic, list<Trait> traits = []> : TTNN_Op<mnemonic, traits> {
let summary = "Reduction op.";
let description = [{
Reduction op.
}];
let arguments = (ins AnyRankedTensor:$input,
BoolAttr:$keep_dim,
OptionalAttr<I32ArrayAttr>:$dim_arg);
let results = (outs AnyRankedTensor:$result);
let hasVerifier = 1;
}
def TTNN_SumOp : TTNN_ReductionOp<"sum"> {
let summary = "Sum reduction op.";
let description = [{
Sum reduction op.
}];
}
def TTNN_MeanOp : TTNN_ReductionOp<"mean"> {
let summary = "Mean reduction op.";
let description = [{
Mean reduction op.
}];
}
def TTNN_MaxOp : TTNN_ReductionOp<"max"> {
let summary = "Max reduction op.";
let description = [{
Max reduction op.
}];
}
def TTNN_MinOp : TTNN_ReductionOp<"min"> {
let summary = "Min reduction op.";
let description = [{
This op computes the minimum of all elements of the tensor or along
specified dimension.
Example:
input: [[1, 5, 3],
[4, 2, 6]]
// Computing along dim 0
output: [1, 2, 3]
// Computing along dim 1
output: [1, 2]
// Computing for entire tensor
output: 1
}];
}
def TTNN_ProdOp : TTNN_Op<"prod"> {
let summary = "Product reduction op.";
let description = [{
This op computes the product of all elements of the tensor (full product)
or along a specific dimension.
Example:
input: [[1, 2, 3],
[4, 5, 6]]
// Computing along dim 0
output: [4, 10, 18]
// Computing along dim 1
output: [6, 120]
// Computing full product
output: 720
}];
let arguments = (ins AnyRankedTensor:$input,
BoolAttr:$all_dimensions,
BoolAttr:$keep_dim,
I64Attr:$dim_arg,
OptionalAttr<TTNN_MemoryConfigAttr>:$memory_config);
let results = (outs AnyRankedTensor:$result);
let hasVerifier = 1;
}
def TTNN_EmbeddingOp : TTNN_NamedDPSOp<"embedding"> {
let summary = "Embedding op.";
let description = [{
Embedding operation.
}];
let arguments = (ins AnyRankedTensor:$input,
AnyRankedTensor:$weight,
AnyRankedTensor:$output);
let results = (outs AnyRankedTensor:$result);
let extraClassDeclaration = [{
MutableOperandRange getDpsInitsMutable() { return getOutputMutable(); }
wa::TTNNOperandsWorkarounds getOperandsWorkarounds() {
return wa::TTNNOperandsWorkaroundsFactory::createEmbeddingOpOperandsWorkarounds();
}
}];
let hasVerifier = 1;
}
def TTNN_UpdateCacheOp : TTNN_InplaceOp<"update_cache"> {
let summary = "Update static cache tensor.";
let description = [{
Updates the `cache` tensor in-place with values from `input` at `update_index` and `batch_offset`.
}];
let arguments = (ins Arg<AnyRankedTensor, "cache tensor", [MemWrite]>:$cache,
AnyRankedTensor:$input,
AnyRankedTensor:$update_index,
I32Attr:$batch_offset);
let hasVerifier = 1;
}
def TTNN_FillCacheOp : TTNN_InplaceOp<"fill_cache"> {
let summary = "Fill static cache tensor.";
let description = [{
Fills the `cache` tensor in-place with values from `input` at `batch_offset`.
}];
let arguments = (ins Arg<AnyRankedTensor, "cache tensor", [MemWrite]>:$cache,
AnyRankedTensor:$input,
I32Attr:$batch_offset);
let hasVerifier = 1;
}
def TTNN_EmbeddingBackwardOp : TTNN_NamedDPSOp<"embedding_bw"> {
let summary = "Embedding backward op.";
let description = [{
Embedding backward operation. Generates the gradient of the embedding operation with respect to the input.
}];
let arguments = (ins AnyRankedTensor:$input,
AnyRankedTensor:$weight,
AnyRankedTensor:$in_gradient,
OptionalAttr<TT_DataTypeAttr>:$dtype,
OptionalAttr<TTNN_MemoryConfigAttr>:$memory_config,
AnyRankedTensor:$output);
let results = (outs AnyRankedTensor:$result);
let extraClassDeclaration = [{
MutableOperandRange getDpsInitsMutable() { return getOutputMutable(); }
wa::TTNNOperandsWorkarounds getOperandsWorkarounds() {
return wa::TTNNOperandsWorkaroundsFactory::createEmbeddingBackwardOpOperandsWorkarounds();
}
}];
let hasVerifier = 1;
}
def TTNN_MorehCumSumOp : TTNN_NamedDPSOp<"moreh_cumsum"> {
let summary = "Moreh cummulative sum op.";
let description = [{
Computes the cumulative sum of elements of a tensor along specified dimension.
Example:
input: [[1, 2, 3],
[4, 5, 6]]
// Cumulative sum along dim=0:
output: [[1, 2, 3],
[5, 7, 9]]
// Cumulative sum along dim=1:
output: [[1, 3, 6],
[4, 9, 15]]
}];
let arguments = (ins AnyRankedTensor:$input,
I64Attr:$dim,
AnyRankedTensor:$output,
OptionalAttr<TTNN_MemoryConfigAttr>:$memory_config);
let results = (outs AnyRankedTensor:$result);
let extraClassDeclaration = [{
MutableOperandRange getDpsInitsMutable() { return getOutputMutable(); }
wa::TTNNOperandsWorkarounds getOperandsWorkarounds() {
RankedTensorType inputType = getInput().getType();
return wa::TTNNOperandsWorkaroundsFactory::createCumSumOpOperandsWorkarounds(inputType);
}
}];
}
def TTNN_SoftmaxOp : TTNN_Op<"softmax",
[DeclareOpInterfaceMethods<TTNN_OpModelInterface, ["getOpConstraints", "getOpRuntime"]>]
> {
let summary = "Softmax op.";
let description = [{
Softmax operation.
}];
let arguments = (ins AnyRankedTensor:$input,
SI32Attr: $dimension);
let results = (outs AnyRankedTensor:$result);
let hasVerifier = 1;
}
def TTNN_TransposeOp : TTNN_Op<"transpose"> {
let summary = "Transpose op.";
let description = [{
Transpose tensor along two given dimensions.
}];
let arguments = (ins AnyRankedTensor:$input,
SI32Attr:$dim0,
SI32Attr:$dim1);
let results = (outs AnyRankedTensor:$result);
let hasVerifier = 1;
}
def TTNN_RepeatInterleaveOp : TTNN_Op<"repeat_interleave"> {
let summary = "Repeat interleave op.";
let description = [{
Repeats elements of a tensor along a specified dimension.
It allows for flexible repetition patterns, where each element can be repeated a different number of times.
This is particularly useful for tasks that require duplicating elements in a non-uniform manner.
Parameters:
- `input`: The input tensor.
- `repeats`: Specifies the number of repetitions for each element, each element is repeated that number of times.
- `dim`: The dimension along which to repeat values.
}];
let arguments = (ins AnyRankedTensor:$input,
UI32Attr:$repeats,
SI32Attr:$dim,
OptionalAttr<TTNN_MemoryConfigAttr>:$memory_config);
let results = (outs AnyRankedTensor:$result);
let hasVerifier = 1;
}
def TTNN_ConcatOp : TTNN_NamedDPSOp<"concat"> {
let summary = "Concat op.";
let description = [{
Concat tensors along a given dimension.
}];
let arguments = (ins Variadic<AnyRankedTensor>:$inputs,
AnyRankedTensor:$output,
SI32Attr:$dim);
let results = (outs AnyRankedTensor:$result);
let extraClassDeclaration = [{
MutableOperandRange getDpsInitsMutable() { return getOutputMutable(); }
}];
let hasVerifier = 1;
}
def TTNN_ReshapeOp : TTNN_Op<"reshape"> {
let summary = "Reshape op.";
let description = [{
Reshape tensor.
}];
let arguments = (ins AnyRankedTensor:$input,
I32ArrayAttr:$shape);
let results = (outs AnyRankedTensor:$result);
let hasVerifier = 1;
}
def TTNN_RepeatOp : TTNN_Op<"repeat"> {
let summary = "Repeat op.";
let description = [{
Returns a new tensor filled with repetition of input tensor according to number of times specified in repeat_dims.
Parameters:
- `input_tensor` (ttnn.Tensor): the input tensor.
- `repeat_dims` (number): The number of repetitions for each element.
}];
let arguments = (ins AnyRankedTensor:$input,
TTNN_ShapeAttr:$repeat_dims);
let results = (outs AnyRankedTensor:$result);
let hasVerifier = 1;
}
def TTNN_SliceOp: TTNN_NamedDPSOp<"slice"> {
let summary = "Slice op.";
let description = [{
Extract a portion of a tensor based on the specified start (`begins`), stop (`ends`), and step
indices for each dimension.
}];
let arguments = (ins AnyRankedTensor:$input,
AnyRankedTensor:$output,
I32ArrayAttr:$begins,
I32ArrayAttr:$ends,
I32ArrayAttr:$step);
let results = (outs AnyRankedTensor:$result);
let extraClassDeclaration = [{
MutableOperandRange getDpsInitsMutable() { return getOutputMutable(); }
}];
let hasVerifier = 1;
}
def TTNN_LinearOp : TTNN_NamedDPSOp<"linear"> {
let summary = "Linear transformation of inputs.";
let description = [{
Produces the matmul of tensors `a` and `b` with optional addition with `bias`.
Example:
// %a = [[1., 2.]], [2., 1.]]
// %b = [[0., 1.], [1., 0.]]
// %bias = [[1.]]
"ttnn.linear"(%a, %b, %bias, %result) : (tensor<2x2xf16>, tensor<2x2xf16>, tensor<1xf16>, tensor<2x2xf16>) -> tensor<2x2xf16>
// %result = [[3., 2.], [2., 3.]]
}];
let arguments = (ins AnyRankedTensor:$a,
AnyRankedTensor:$b,
Optional<AnyRankedTensor>:$bias,
AnyRankedTensor:$output);
let results = (outs AnyRankedTensor:$result);
let extraClassDeclaration = [{
MutableOperandRange getDpsInitsMutable() { return getOutputMutable(); }
}];
let hasVerifier = 1;
}
// ANCHOR: adding_an_op_matmul_ttnn
def TTNN_MatmulOp : TTNN_NamedDPSOp<"matmul",
[DeclareOpInterfaceMethods<TTNN_OpModelInterface, ["getOpConstraints", "getOpRuntime"]>]
> {
let arguments = (ins AnyRankedTensor:$a,
AnyRankedTensor:$b,
AnyRankedTensor:$output);
let results = (outs AnyRankedTensor:$result);
let extraClassDeclaration = [{
MutableOperandRange getDpsInitsMutable() { return getOutputMutable(); }
}];
let hasVerifier = 1;
}
// ANCHOR_END: adding_an_op_matmul_ttnn
def TTNN_Conv2dOp : TTNN_NamedDPSOp<"conv2d"> {
let summary = "Conv2d operation.";
let description = [{
Applies a 2D convolution over an input image composed of several input planes.
}];
let arguments = (ins AnyRankedTensor:$input,
AnyRankedTensor:$weight,
Optional<AnyRankedTensor>:$bias,
AnyRankedTensor:$output,
TT_Device:$device,
I32Attr:$in_channels,
I32Attr:$out_channels,
I32Attr:$batch_size,
I32Attr:$input_height,
I32Attr:$input_width,
I32Attr:$kernel_height,
I32Attr:$kernel_width,
I32Attr:$stride_height,
I32Attr:$stride_width,
I32Attr:$padding_height,
I32Attr:$padding_width,
I32Attr:$dilation_height,
I32Attr:$dilation_width,
I32Attr:$groups);
let results = (outs AnyRankedTensor:$result);
let extraClassDeclaration = [{
MutableOperandRange getDpsInitsMutable() { return getOutputMutable(); }
}];
let hasVerifier = 1;
}
def TTNN_ConvTranspose2dOp : TTNN_NamedDPSOp<"conv_transpose2d"> {
let summary = "ConvTranspose2d operation.";
let description = [{