From 9b348340da551e94af503661b339ee8d42122087 Mon Sep 17 00:00:00 2001 From: Philippe Tillet Date: Wed, 21 Dec 2022 01:30:50 -0800 Subject: [PATCH] Merge `triton-mlir` branch - Complete rewrite of the backend from scratch (#1004) This PR merges the `triton-mlir` branch, in which we have been quietly rewriting the Triton backend from scratch to increase maintainability, stability and ultimately performance. Changes to the runtime are minimal, and this new version aims to remain backward-compatible with the previous commit. The legacy backend is now officially deprecated, but can still be accessed via the `legacy-backend` tag. Co-authored-by: Keren Zhou Co-authored-by: Yan Chunwei Co-authored-by: goostavz <109190422+goostavz@users.noreply.github.com> Co-authored-by: Shintaro Iwasaki Co-authored-by: Yan Da Co-authored-by: Jun Yang Co-authored-by: Ian Bearman Co-authored-by: Jason Ansel Co-authored-by: Qingyi Liu Co-authored-by: ben-zhang-609 <110140741+ben-zhang-609@users.noreply.github.com> Co-authored-by: Chenggang Zhao Co-authored-by: ben-zhang-609 Co-authored-by: dongdongl --- .clang-format | 1 + .github/CODEOWNERS | 57 + .github/workflows/documentation.yml | 55 - .github/workflows/integration-tests.yml | 90 +- .gitignore | 16 +- CMakeLists.txt | 238 +- README.md | 2 +- bin/CMakeLists.txt | 60 + bin/FileCheck/CMakeLists.txt | 2 + bin/FileCheck/FileCheck.cpp | 882 + bin/triton-opt.cpp | 42 + bin/triton-translate.cpp | 131 + deps/dlfcn-win32 | 1 - docs/conf.py | 4 +- docs/getting-started/installation.rst | 6 +- .../chapter-2/related-work.rst | 2 +- docs/python-api/triton.language.rst | 4 - include/CMakeLists.txt | 1 + include/triton/Analysis/Alias.h | 80 + include/triton/Analysis/Allocation.h | 192 + include/triton/Analysis/AxisInfo.h | 144 + include/triton/Analysis/Membar.h | 119 + include/triton/Analysis/Utility.h | 82 + include/triton/CMakeLists.txt | 2 + include/triton/Conversion/CMakeLists.txt | 4 + include/triton/Conversion/MLIRTypes.h | 40 + include/triton/Conversion/Passes.h | 17 + include/triton/Conversion/Passes.td | 54 + .../Conversion/TritonGPUToLLVM/PTXAsmFormat.h | 326 + .../TritonGPUToLLVM/TritonGPUToLLVMPass.h | 22 + .../TritonToTritonGPU/TritonToTritonGPUPass.h | 25 + include/triton/Dialect/CMakeLists.txt | 2 + include/triton/Dialect/Triton/CMakeLists.txt | 2 + .../triton/Dialect/Triton/IR/CMakeLists.txt | 19 + include/triton/Dialect/Triton/IR/Dialect.h | 48 + include/triton/Dialect/Triton/IR/Interfaces.h | 9 + include/triton/Dialect/Triton/IR/Traits.h | 60 + .../Dialect/Triton/IR/TritonAttrDefs.td | 68 + .../triton/Dialect/Triton/IR/TritonDialect.td | 46 + .../Dialect/Triton/IR/TritonInterfaces.td | 11 + include/triton/Dialect/Triton/IR/TritonOps.td | 423 + .../triton/Dialect/Triton/IR/TritonTypes.td | 71 + include/triton/Dialect/Triton/IR/Types.h | 10 + .../Dialect/Triton/Transforms/CMakeLists.txt | 3 + .../triton/Dialect/Triton/Transforms/Passes.h | 18 + .../Dialect/Triton/Transforms/Passes.td | 23 + .../triton/Dialect/TritonGPU/CMakeLists.txt | 2 + .../Dialect/TritonGPU/IR/CMakeLists.txt | 12 + include/triton/Dialect/TritonGPU/IR/Dialect.h | 46 + include/triton/Dialect/TritonGPU/IR/Traits.h | 31 + .../Dialect/TritonGPU/IR/TritonGPUAttrDefs.td | 481 + .../Dialect/TritonGPU/IR/TritonGPUDialect.td | 36 + .../Dialect/TritonGPU/IR/TritonGPUOps.td | 198 + .../TritonGPU/Transforms/CMakeLists.txt | 3 + .../Dialect/TritonGPU/Transforms/Passes.h | 25 + .../Dialect/TritonGPU/Transforms/Passes.td | 87 + .../Transforms/TritonGPUConversion.h | 33 + .../triton/Target/LLVMIR/LLVMIRTranslation.h | 39 + include/triton/Target/PTX/PTXTranslation.h | 17 + .../sys/getenv.hpp => Tools/Sys/GetEnv.hpp} | 34 +- include/triton/codegen/analysis/align.h | 87 - include/triton/codegen/analysis/allocation.h | 47 - include/triton/codegen/analysis/axes.h | 53 - include/triton/codegen/analysis/layout.h | 370 - include/triton/codegen/analysis/liveness.h | 69 - include/triton/codegen/analysis/swizzle.h | 43 - include/triton/codegen/extern_lib.h | 90 - include/triton/codegen/pass.h | 41 - include/triton/codegen/selection/generator.h | 300 - include/triton/codegen/target.h | 105 - include/triton/codegen/transform/coalesce.h | 49 - include/triton/codegen/transform/cts.h | 44 - include/triton/codegen/transform/dce.h | 24 - .../triton/codegen/transform/disassociate.h | 22 - include/triton/codegen/transform/inline.h | 31 - include/triton/codegen/transform/membar.h | 72 - include/triton/codegen/transform/peephole.h | 56 - include/triton/codegen/transform/pipeline.h | 30 - include/triton/codegen/transform/prefetch.h | 27 - include/triton/codegen/transform/reorder.h | 26 - include/triton/driver/dispatch.h | 318 - include/triton/driver/error.h | 220 - include/triton/driver/llvm.h | 20 - include/triton/external/CUDA/cuda.h | 18948 ---------------- include/triton/external/CUDA/nvml.h | 6281 ----- include/triton/external/half.hpp | 3067 --- include/triton/external/hip.h | 288 - include/triton/ir/basic_block.h | 92 - include/triton/ir/builder.h | 212 - include/triton/ir/constant.h | 113 - include/triton/ir/context.h | 29 - include/triton/ir/context_impl.h | 47 - include/triton/ir/enums.h | 187 - include/triton/ir/function.h | 145 - include/triton/ir/instructions.h | 1147 - include/triton/ir/metadata.h | 34 - include/triton/ir/module.h | 129 - include/triton/ir/print.h | 22 - include/triton/ir/type.h | 252 - include/triton/ir/utils.h | 31 - include/triton/ir/value.h | 95 - include/triton/ir/visitor.h | 191 - include/triton/tools/bench.hpp | 54 - include/triton/tools/graph.h | 70 - include/triton/tools/sha1.hpp | 186 - include/triton/tools/sys/exec.hpp | 46 - include/triton/tools/sys/mkdir.hpp | 76 - include/triton/tools/thread_pool.h | 90 - lib/Analysis/Alias.cpp | 67 + lib/Analysis/Allocation.cpp | 476 + lib/Analysis/AxisInfo.cpp | 321 + lib/Analysis/CMakeLists.txt | 10 + lib/Analysis/Membar.cpp | 137 + lib/Analysis/Utility.cpp | 151 + lib/CMakeLists.txt | 5 + lib/Conversion/CMakeLists.txt | 2 + lib/Conversion/TritonGPUToLLVM/CMakeLists.txt | 31 + .../TritonGPUToLLVM/ConvertLayoutOpToLLVM.cpp | 686 + .../TritonGPUToLLVM/ConvertLayoutOpToLLVM.h | 24 + lib/Conversion/TritonGPUToLLVM/DotOpHelpers.h | 1782 ++ .../TritonGPUToLLVM/DotOpToLLVM.cpp | 311 + lib/Conversion/TritonGPUToLLVM/DotOpToLLVM.h | 15 + .../TritonGPUToLLVM/ElementwiseOpToLLVM.cpp | 865 + .../TritonGPUToLLVM/ElementwiseOpToLLVM.h | 16 + .../TritonGPUToLLVM/LoadStoreOpToLLVM.cpp | 884 + .../TritonGPUToLLVM/LoadStoreOpToLLVM.h | 16 + .../TritonGPUToLLVM/PTXAsmFormat.cpp | 217 + .../TritonGPUToLLVM/ReduceOpToLLVM.cpp | 488 + .../TritonGPUToLLVM/ReduceOpToLLVM.h | 15 + .../TritonGPUToLLVM/TritonGPUToLLVM.cpp | 521 + .../TritonGPUToLLVM/TritonGPUToLLVM.h | 15 + .../TritonGPUToLLVM/TritonGPUToLLVMBase.h | 550 + .../TritonGPUToLLVM/TritonGPUToLLVMPass.cpp | 406 + .../TritonGPUToLLVM/TypeConverter.h | 150 + lib/Conversion/TritonGPUToLLVM/Utility.h | 369 + .../TritonGPUToLLVM/ViewOpToLLVM.cpp | 229 + lib/Conversion/TritonGPUToLLVM/ViewOpToLLVM.h | 15 + .../TritonToTritonGPU/CMakeLists.txt | 19 + .../TritonToTritonGPUPass.cpp | 647 + lib/Dialect/CMakeLists.txt | 2 + lib/Dialect/Triton/CMakeLists.txt | 2 + lib/Dialect/Triton/IR/CMakeLists.txt | 20 + lib/Dialect/Triton/IR/Dialect.cpp | 51 + lib/Dialect/Triton/IR/Interfaces.cpp | 0 lib/Dialect/Triton/IR/Ops.cpp | 346 + lib/Dialect/Triton/IR/Traits.cpp | 71 + lib/Dialect/Triton/IR/Types.cpp | 39 + lib/Dialect/Triton/Transforms/CMakeLists.txt | 11 + lib/Dialect/Triton/Transforms/Combine.cpp | 209 + lib/Dialect/Triton/Transforms/Combine.td | 48 + lib/Dialect/TritonGPU/CMakeLists.txt | 2 + lib/Dialect/TritonGPU/IR/CMakeLists.txt | 11 + lib/Dialect/TritonGPU/IR/Dialect.cpp | 783 + lib/Dialect/TritonGPU/IR/Traits.cpp | 14 + .../TritonGPU/Transforms/CMakeLists.txt | 21 + .../Transforms/CanonicalizeLoops.cpp | 55 + lib/Dialect/TritonGPU/Transforms/Coalesce.cpp | 139 + lib/Dialect/TritonGPU/Transforms/Combine.cpp | 1243 + lib/Dialect/TritonGPU/Transforms/Combine.td | 7 + lib/Dialect/TritonGPU/Transforms/Pipeline.cpp | 656 + lib/Dialect/TritonGPU/Transforms/Prefetch.cpp | 313 + .../Transforms/TritonGPUConversion.cpp | 103 + lib/Target/CMakeLists.txt | 2 + lib/Target/LLVMIR/CMakeLists.txt | 12 + lib/Target/LLVMIR/LLVMIRTranslation.cpp | 237 + lib/Target/PTX/CMakeLists.txt | 9 + lib/Target/PTX/PTXTranslation.cpp | 144 + lib/codegen/analysis/align.cc | 634 - lib/codegen/analysis/allocation.cc | 103 - lib/codegen/analysis/axes.cc | 174 - lib/codegen/analysis/layout.cc | 722 - lib/codegen/analysis/liveness.cc | 124 - lib/codegen/analysis/swizzle.cc | 64 - lib/codegen/extern_lib.cc | 63 - lib/codegen/pass.cc | 170 - lib/codegen/selection/generator.cc | 4157 ---- lib/codegen/target.cc | 173 - lib/codegen/transform/coalesce.cc | 121 - lib/codegen/transform/cts.cc | 118 - lib/codegen/transform/dce.cc | 79 - lib/codegen/transform/disassociate.cc | 62 - lib/codegen/transform/inline.cc | 147 - lib/codegen/transform/membar.cc | 254 - lib/codegen/transform/peephole.cc | 331 - lib/codegen/transform/pipeline.cc | 331 - lib/codegen/transform/prefetch.cc | 133 - lib/codegen/transform/reorder.cc | 51 - lib/driver/dispatch.cc | 302 - lib/driver/error.cc | 166 - lib/driver/llvm.cc | 376 - lib/ir/basic_block.cc | 91 - lib/ir/builder.cc | 491 - lib/ir/constant.cc | 120 - lib/ir/context.cc | 40 - lib/ir/function.cc | 66 - lib/ir/instructions.cc | 1059 - lib/ir/metadata.cc | 14 - lib/ir/module.cc | 27 - lib/ir/print.cc | 450 - lib/ir/type.cc | 252 - lib/ir/utils.cc | 77 - lib/ir/value.cc | 82 - python/bench/README.md | 5 - python/bench/bench_blocksparse.py | 92 - python/bench/bench_cross_entropy.py | 41 - python/bench/bench_matmul.py | 67 - python/bench/requirements-bench.txt | 2 - python/bench/run.py | 44 - python/examples/copy_strided.py | 19 + python/examples/empty.py | 13 + python/setup.py | 86 +- python/src/cutlass.cc | 202 - python/src/functions.h | 696 - python/src/main.cc | 4 - python/src/superblock.cc | 119 - python/src/triton.cc | 2322 +- python/test/unit/language/printf_helper.py | 56 + python/test/unit/language/test_core.py | 334 +- python/test/unit/language/test_dequantize.py | 261 - python/test/unit/language/test_printf.py | 22 + .../test/unit/operators/test_blocksparse.py | 12 +- .../test/unit/operators/test_cross_entropy.py | 7 +- python/test/unit/operators/test_matmul.py | 7 +- python/triton/__init__.py | 45 +- python/triton/compiler.py | 1309 +- python/triton/impl/__init__.py | 18 + python/triton/impl/base.py | 36 + python/triton/language/__init__.py | 179 +- python/triton/language/core.py | 389 +- python/triton/language/extern.py | 31 +- python/triton/language/libdevice.10.bc | Bin 469572 -> 473728 bytes python/triton/language/libdevice.py | 1049 +- python/triton/language/random.py | 8 +- python/triton/language/semantic.py | 350 +- python/triton/ops/__init__.py | 11 +- python/triton/ops/blocksparse/__init__.py | 6 +- python/triton/ops/blocksparse/softmax.py | 8 +- python/triton/ops/matmul_perf_model.py | 22 +- python/triton/runtime/__init__.py | 14 +- python/triton/runtime/autotuner.py | 15 +- python/triton/runtime/jit.py | 68 +- python/triton/testing.py | 23 +- python/triton/tools/aot.py | 61 + python/triton/tools/build_extern.py | 14 +- python/triton/tools/compare_asm.py | 76 - python/tutorials/02-fused-softmax.py | 4 +- python/tutorials/03-matrix-multiplication.py | 39 +- python/tutorials/05-layer-norm.py | 265 +- python/tutorials/06-fused-attention.py | 49 +- python/tutorials/07-libdevice-function.py | 74 - test/Analysis/test-alias.mlir | 205 + test/Analysis/test-alignment.mlir | 141 + test/Analysis/test-allocation.mlir | 319 + test/Analysis/test-membar.mlir | 318 + test/CMakeLists.txt | 26 + test/Conversion/triton_ops.mlir | 132 + test/Conversion/triton_to_tritongpu.mlir | 53 + test/Conversion/tritongpu_to_llvm.mlir | 1016 + test/Target/tritongpu_to_llvmir.mlir | 16 + test/Target/tritongpu_to_ptx.mlir | 14 + test/Triton/combine.mlir | 146 + test/Triton/vecadd.mlir | 130 + test/TritonGPU/coalesce.mlir | 53 + test/TritonGPU/combine.mlir | 186 + test/TritonGPU/loop-pipeline.mlir | 183 + test/TritonGPU/matmul.mlir | 106 + test/TritonGPU/prefetch.mlir | 65 + test/lib/Analysis/CMakeLists.txt | 9 + test/lib/Analysis/TestAlias.cpp | 92 + test/lib/Analysis/TestAllocation.cpp | 54 + test/lib/Analysis/TestAxisInfo.cpp | 69 + test/lib/Analysis/TestMembar.cpp | 52 + test/lib/CMakeLists.txt | 1 + test/lit.cfg.py | 67 + test/lit.site.cfg.py.in | 23 + unittest/Analysis/CMakeLists.txt | 5 + unittest/Analysis/UtilityTest.cpp | 29 + unittest/CMakeLists.txt | 29 + unittest/Conversion/CMakeLists.txt | 1 + .../Conversion/TritonGPUToLLVM/CMakeLists.txt | 5 + .../TritonGPUToLLVM/PTXAsmFormatTest.cpp | 147 + unittest/Dialect/CMakeLists.txt | 1 + unittest/Dialect/TritonGPU/CMakeLists.txt | 6 + unittest/Dialect/TritonGPU/SwizzleTest.cpp | 53 + unittest/googletest.cmake | 23 + 285 files changed, 26333 insertions(+), 50164 deletions(-) create mode 100644 .clang-format create mode 100644 .github/CODEOWNERS delete mode 100644 .github/workflows/documentation.yml create mode 100644 bin/CMakeLists.txt create mode 100644 bin/FileCheck/CMakeLists.txt create mode 100644 bin/FileCheck/FileCheck.cpp create mode 100644 bin/triton-opt.cpp create mode 100644 bin/triton-translate.cpp delete mode 160000 deps/dlfcn-win32 create mode 100644 include/CMakeLists.txt create mode 100644 include/triton/Analysis/Alias.h create mode 100644 include/triton/Analysis/Allocation.h create mode 100644 include/triton/Analysis/AxisInfo.h create mode 100644 include/triton/Analysis/Membar.h create mode 100644 include/triton/Analysis/Utility.h create mode 100644 include/triton/CMakeLists.txt create mode 100644 include/triton/Conversion/CMakeLists.txt create mode 100644 include/triton/Conversion/MLIRTypes.h create mode 100644 include/triton/Conversion/Passes.h create mode 100644 include/triton/Conversion/Passes.td create mode 100644 include/triton/Conversion/TritonGPUToLLVM/PTXAsmFormat.h create mode 100644 include/triton/Conversion/TritonGPUToLLVM/TritonGPUToLLVMPass.h create mode 100644 include/triton/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.h create mode 100644 include/triton/Dialect/CMakeLists.txt create mode 100644 include/triton/Dialect/Triton/CMakeLists.txt create mode 100644 include/triton/Dialect/Triton/IR/CMakeLists.txt create mode 100644 include/triton/Dialect/Triton/IR/Dialect.h create mode 100644 include/triton/Dialect/Triton/IR/Interfaces.h create mode 100644 include/triton/Dialect/Triton/IR/Traits.h create mode 100644 include/triton/Dialect/Triton/IR/TritonAttrDefs.td create mode 100644 include/triton/Dialect/Triton/IR/TritonDialect.td create mode 100644 include/triton/Dialect/Triton/IR/TritonInterfaces.td create mode 100644 include/triton/Dialect/Triton/IR/TritonOps.td create mode 100644 include/triton/Dialect/Triton/IR/TritonTypes.td create mode 100644 include/triton/Dialect/Triton/IR/Types.h create mode 100644 include/triton/Dialect/Triton/Transforms/CMakeLists.txt create mode 100644 include/triton/Dialect/Triton/Transforms/Passes.h create mode 100644 include/triton/Dialect/Triton/Transforms/Passes.td create mode 100644 include/triton/Dialect/TritonGPU/CMakeLists.txt create mode 100644 include/triton/Dialect/TritonGPU/IR/CMakeLists.txt create mode 100644 include/triton/Dialect/TritonGPU/IR/Dialect.h create mode 100644 include/triton/Dialect/TritonGPU/IR/Traits.h create mode 100644 include/triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.td create mode 100644 include/triton/Dialect/TritonGPU/IR/TritonGPUDialect.td create mode 100644 include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td create mode 100644 include/triton/Dialect/TritonGPU/Transforms/CMakeLists.txt create mode 100644 include/triton/Dialect/TritonGPU/Transforms/Passes.h create mode 100644 include/triton/Dialect/TritonGPU/Transforms/Passes.td create mode 100644 include/triton/Dialect/TritonGPU/Transforms/TritonGPUConversion.h create mode 100644 include/triton/Target/LLVMIR/LLVMIRTranslation.h create mode 100644 include/triton/Target/PTX/PTXTranslation.h rename include/triton/{tools/sys/getenv.hpp => Tools/Sys/GetEnv.hpp} (63%) mode change 100755 => 100644 delete mode 100644 include/triton/codegen/analysis/align.h delete mode 100644 include/triton/codegen/analysis/allocation.h delete mode 100644 include/triton/codegen/analysis/axes.h delete mode 100644 include/triton/codegen/analysis/layout.h delete mode 100644 include/triton/codegen/analysis/liveness.h delete mode 100644 include/triton/codegen/analysis/swizzle.h delete mode 100644 include/triton/codegen/extern_lib.h delete mode 100644 include/triton/codegen/pass.h delete mode 100644 include/triton/codegen/selection/generator.h delete mode 100644 include/triton/codegen/target.h delete mode 100644 include/triton/codegen/transform/coalesce.h delete mode 100644 include/triton/codegen/transform/cts.h delete mode 100644 include/triton/codegen/transform/dce.h delete mode 100644 include/triton/codegen/transform/disassociate.h delete mode 100644 include/triton/codegen/transform/inline.h delete mode 100644 include/triton/codegen/transform/membar.h delete mode 100644 include/triton/codegen/transform/peephole.h delete mode 100644 include/triton/codegen/transform/pipeline.h delete mode 100644 include/triton/codegen/transform/prefetch.h delete mode 100644 include/triton/codegen/transform/reorder.h delete mode 100755 include/triton/driver/dispatch.h delete mode 100755 include/triton/driver/error.h delete mode 100644 include/triton/driver/llvm.h delete mode 100644 include/triton/external/CUDA/cuda.h delete mode 100755 include/triton/external/CUDA/nvml.h delete mode 100644 include/triton/external/half.hpp delete mode 100644 include/triton/external/hip.h delete mode 100644 include/triton/ir/basic_block.h delete mode 100644 include/triton/ir/builder.h delete mode 100644 include/triton/ir/constant.h delete mode 100644 include/triton/ir/context.h delete mode 100644 include/triton/ir/context_impl.h delete mode 100644 include/triton/ir/enums.h delete mode 100644 include/triton/ir/function.h delete mode 100644 include/triton/ir/instructions.h delete mode 100644 include/triton/ir/metadata.h delete mode 100644 include/triton/ir/module.h delete mode 100644 include/triton/ir/print.h delete mode 100644 include/triton/ir/type.h delete mode 100644 include/triton/ir/utils.h delete mode 100644 include/triton/ir/value.h delete mode 100644 include/triton/ir/visitor.h delete mode 100644 include/triton/tools/bench.hpp delete mode 100644 include/triton/tools/graph.h delete mode 100644 include/triton/tools/sha1.hpp delete mode 100644 include/triton/tools/sys/exec.hpp delete mode 100755 include/triton/tools/sys/mkdir.hpp delete mode 100644 include/triton/tools/thread_pool.h create mode 100644 lib/Analysis/Alias.cpp create mode 100644 lib/Analysis/Allocation.cpp create mode 100644 lib/Analysis/AxisInfo.cpp create mode 100644 lib/Analysis/CMakeLists.txt create mode 100644 lib/Analysis/Membar.cpp create mode 100644 lib/Analysis/Utility.cpp create mode 100644 lib/CMakeLists.txt create mode 100644 lib/Conversion/CMakeLists.txt create mode 100644 lib/Conversion/TritonGPUToLLVM/CMakeLists.txt create mode 100644 lib/Conversion/TritonGPUToLLVM/ConvertLayoutOpToLLVM.cpp create mode 100644 lib/Conversion/TritonGPUToLLVM/ConvertLayoutOpToLLVM.h create mode 100644 lib/Conversion/TritonGPUToLLVM/DotOpHelpers.h create mode 100644 lib/Conversion/TritonGPUToLLVM/DotOpToLLVM.cpp create mode 100644 lib/Conversion/TritonGPUToLLVM/DotOpToLLVM.h create mode 100644 lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp create mode 100644 lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.h create mode 100644 lib/Conversion/TritonGPUToLLVM/LoadStoreOpToLLVM.cpp create mode 100644 lib/Conversion/TritonGPUToLLVM/LoadStoreOpToLLVM.h create mode 100644 lib/Conversion/TritonGPUToLLVM/PTXAsmFormat.cpp create mode 100644 lib/Conversion/TritonGPUToLLVM/ReduceOpToLLVM.cpp create mode 100644 lib/Conversion/TritonGPUToLLVM/ReduceOpToLLVM.h create mode 100644 lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVM.cpp create mode 100644 lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVM.h create mode 100644 lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVMBase.h create mode 100644 lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVMPass.cpp create mode 100644 lib/Conversion/TritonGPUToLLVM/TypeConverter.h create mode 100644 lib/Conversion/TritonGPUToLLVM/Utility.h create mode 100644 lib/Conversion/TritonGPUToLLVM/ViewOpToLLVM.cpp create mode 100644 lib/Conversion/TritonGPUToLLVM/ViewOpToLLVM.h create mode 100644 lib/Conversion/TritonToTritonGPU/CMakeLists.txt create mode 100644 lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp create mode 100644 lib/Dialect/CMakeLists.txt create mode 100644 lib/Dialect/Triton/CMakeLists.txt create mode 100644 lib/Dialect/Triton/IR/CMakeLists.txt create mode 100644 lib/Dialect/Triton/IR/Dialect.cpp create mode 100644 lib/Dialect/Triton/IR/Interfaces.cpp create mode 100644 lib/Dialect/Triton/IR/Ops.cpp create mode 100644 lib/Dialect/Triton/IR/Traits.cpp create mode 100644 lib/Dialect/Triton/IR/Types.cpp create mode 100644 lib/Dialect/Triton/Transforms/CMakeLists.txt create mode 100644 lib/Dialect/Triton/Transforms/Combine.cpp create mode 100644 lib/Dialect/Triton/Transforms/Combine.td create mode 100644 lib/Dialect/TritonGPU/CMakeLists.txt create mode 100644 lib/Dialect/TritonGPU/IR/CMakeLists.txt create mode 100644 lib/Dialect/TritonGPU/IR/Dialect.cpp create mode 100644 lib/Dialect/TritonGPU/IR/Traits.cpp create mode 100644 lib/Dialect/TritonGPU/Transforms/CMakeLists.txt create mode 100644 lib/Dialect/TritonGPU/Transforms/CanonicalizeLoops.cpp create mode 100644 lib/Dialect/TritonGPU/Transforms/Coalesce.cpp create mode 100644 lib/Dialect/TritonGPU/Transforms/Combine.cpp create mode 100644 lib/Dialect/TritonGPU/Transforms/Combine.td create mode 100644 lib/Dialect/TritonGPU/Transforms/Pipeline.cpp create mode 100644 lib/Dialect/TritonGPU/Transforms/Prefetch.cpp create mode 100644 lib/Dialect/TritonGPU/Transforms/TritonGPUConversion.cpp create mode 100644 lib/Target/CMakeLists.txt create mode 100644 lib/Target/LLVMIR/CMakeLists.txt create mode 100644 lib/Target/LLVMIR/LLVMIRTranslation.cpp create mode 100644 lib/Target/PTX/CMakeLists.txt create mode 100644 lib/Target/PTX/PTXTranslation.cpp delete mode 100644 lib/codegen/analysis/align.cc delete mode 100644 lib/codegen/analysis/allocation.cc delete mode 100644 lib/codegen/analysis/axes.cc delete mode 100644 lib/codegen/analysis/layout.cc delete mode 100644 lib/codegen/analysis/liveness.cc delete mode 100644 lib/codegen/analysis/swizzle.cc delete mode 100644 lib/codegen/extern_lib.cc delete mode 100644 lib/codegen/pass.cc delete mode 100644 lib/codegen/selection/generator.cc delete mode 100644 lib/codegen/target.cc delete mode 100644 lib/codegen/transform/coalesce.cc delete mode 100644 lib/codegen/transform/cts.cc delete mode 100644 lib/codegen/transform/dce.cc delete mode 100644 lib/codegen/transform/disassociate.cc delete mode 100644 lib/codegen/transform/inline.cc delete mode 100644 lib/codegen/transform/membar.cc delete mode 100644 lib/codegen/transform/peephole.cc delete mode 100644 lib/codegen/transform/pipeline.cc delete mode 100644 lib/codegen/transform/prefetch.cc delete mode 100644 lib/codegen/transform/reorder.cc delete mode 100755 lib/driver/dispatch.cc delete mode 100755 lib/driver/error.cc delete mode 100644 lib/driver/llvm.cc delete mode 100644 lib/ir/basic_block.cc delete mode 100644 lib/ir/builder.cc delete mode 100644 lib/ir/constant.cc delete mode 100644 lib/ir/context.cc delete mode 100644 lib/ir/function.cc delete mode 100644 lib/ir/instructions.cc delete mode 100644 lib/ir/metadata.cc delete mode 100644 lib/ir/module.cc delete mode 100644 lib/ir/print.cc delete mode 100644 lib/ir/type.cc delete mode 100644 lib/ir/utils.cc delete mode 100644 lib/ir/value.cc delete mode 100644 python/bench/README.md delete mode 100644 python/bench/bench_blocksparse.py delete mode 100644 python/bench/bench_cross_entropy.py delete mode 100644 python/bench/bench_matmul.py delete mode 100644 python/bench/requirements-bench.txt delete mode 100644 python/bench/run.py create mode 100644 python/examples/copy_strided.py create mode 100644 python/examples/empty.py delete mode 100644 python/src/cutlass.cc delete mode 100644 python/src/functions.h delete mode 100644 python/src/superblock.cc create mode 100644 python/test/unit/language/printf_helper.py delete mode 100644 python/test/unit/language/test_dequantize.py create mode 100644 python/test/unit/language/test_printf.py create mode 100644 python/triton/impl/__init__.py create mode 100644 python/triton/impl/base.py mode change 100644 => 100755 python/triton/language/libdevice.10.bc create mode 100644 python/triton/tools/aot.py delete mode 100644 python/triton/tools/compare_asm.py delete mode 100644 python/tutorials/07-libdevice-function.py create mode 100644 test/Analysis/test-alias.mlir create mode 100644 test/Analysis/test-alignment.mlir create mode 100644 test/Analysis/test-allocation.mlir create mode 100644 test/Analysis/test-membar.mlir create mode 100644 test/CMakeLists.txt create mode 100644 test/Conversion/triton_ops.mlir create mode 100644 test/Conversion/triton_to_tritongpu.mlir create mode 100644 test/Conversion/tritongpu_to_llvm.mlir create mode 100644 test/Target/tritongpu_to_llvmir.mlir create mode 100644 test/Target/tritongpu_to_ptx.mlir create mode 100644 test/Triton/combine.mlir create mode 100644 test/Triton/vecadd.mlir create mode 100644 test/TritonGPU/coalesce.mlir create mode 100644 test/TritonGPU/combine.mlir create mode 100644 test/TritonGPU/loop-pipeline.mlir create mode 100644 test/TritonGPU/matmul.mlir create mode 100644 test/TritonGPU/prefetch.mlir create mode 100644 test/lib/Analysis/CMakeLists.txt create mode 100644 test/lib/Analysis/TestAlias.cpp create mode 100644 test/lib/Analysis/TestAllocation.cpp create mode 100644 test/lib/Analysis/TestAxisInfo.cpp create mode 100644 test/lib/Analysis/TestMembar.cpp create mode 100644 test/lib/CMakeLists.txt create mode 100644 test/lit.cfg.py create mode 100644 test/lit.site.cfg.py.in create mode 100644 unittest/Analysis/CMakeLists.txt create mode 100644 unittest/Analysis/UtilityTest.cpp create mode 100644 unittest/CMakeLists.txt create mode 100644 unittest/Conversion/CMakeLists.txt create mode 100644 unittest/Conversion/TritonGPUToLLVM/CMakeLists.txt create mode 100644 unittest/Conversion/TritonGPUToLLVM/PTXAsmFormatTest.cpp create mode 100644 unittest/Dialect/CMakeLists.txt create mode 100644 unittest/Dialect/TritonGPU/CMakeLists.txt create mode 100644 unittest/Dialect/TritonGPU/SwizzleTest.cpp create mode 100644 unittest/googletest.cmake diff --git a/.clang-format b/.clang-format new file mode 100644 index 000000000000..9b3aa8b7213b --- /dev/null +++ b/.clang-format @@ -0,0 +1 @@ +BasedOnStyle: LLVM diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000000..9621a580a49a --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,57 @@ +# These owners will be the default owners for everything in +# the repo. Unless a later match takes precedence, +# @global-owner1 and @global-owner2 will be requested for +# review when someone opens a pull request. +* @ptillet + +# -------- +# Analyses +# -------- +# Alias analysis +include/triton/Analysis/Alias.h @Jokeren +lib/Analysis/Alias.cpp @Jokeren +# Allocation analysis +include/triton/Analysis/Allocation.h @Jokeren +lib/Analysis/Allocation.cpp @Jokeren +# Membar analysis +include/triton/Analysis/Membar.h @Jokeren +lib/Analysis/Membar.cpp @Jokeren +# AxisInfo analysis +include/triton/Analysis/AxisInfo.h @ptillet +lib/Analysis/AxisInfo.cpp @ptillet +# Utilities +include/triton/Analysis/Utility.h @Jokeren +lib/Analysis/Utility.cpp @Jokeren + +# ---------- +# Dialects +# ---------- +# Pipeline pass +lib/Dialect/TritonGPU/Transforms/Pipeline.cpp @daadaada +# Prefetch pass +lib/Dialect/TritonGPU/Transforms/Prefetch.cpp @daadaada +# Coalesce pass +lib/Dialect/TritonGPU/Transforms/Coalesce.cpp @ptillet +# Layout simplification pass +lib/Dialect/TritonGPU/Transforms/Combine.cpp @ptillet + +# ----------- +# Conversions +# ----------- +# TritonGPUToLLVM +include/triton/Conversion/TritonGPUToLLVM/ @goostavz @Superjomn +lib/Conversions/TritonGPUToLLVM @goostavz @Superjomn +# TritonToTritonGPU +include/triton/Conversion/TritonToTritonGPU/ @daadaada +lib/Dialect/TritonGPU/Transforms/TritonGPUConversion.cpp @daadaada + + +# ------- +# Targets +# ------- +# LLVMIR +include/triton/Target/LLVMIR/ @goostavz @Superjomn +lib/Target/LLVMIR @goostavz @Superjomn +# PTX +include/triton/Target/PTX/ @goostavz @Superjomn +lib/Target/PTX @goostavz @Superjomn diff --git a/.github/workflows/documentation.yml b/.github/workflows/documentation.yml deleted file mode 100644 index 28cb20e37a1f..000000000000 --- a/.github/workflows/documentation.yml +++ /dev/null @@ -1,55 +0,0 @@ -name: Documentation -on: - workflow_dispatch: - schedule: - - cron: "0 0 * * *" - -jobs: - - Build-Documentation: - - runs-on: [self-hosted, V100] - - steps: - - - - name: Checkout gh-pages - uses: actions/checkout@v1 - with: - ref: 'gh-pages' - - - name: Clear docs - run: | - rm -r /tmp/triton-docs - continue-on-error: true - - - name: Checkout branch - uses: actions/checkout@v1 - - - name: Build docs - run: | - git fetch origin master:master - cd docs - sphinx-multiversion . _build/html/ - - - name: Publish docs - run: | - git branch - # update docs - mkdir /tmp/triton-docs; - mv docs/_build/html/* /tmp/triton-docs/ - git checkout gh-pages - cp -r CNAME /tmp/triton-docs/ - cp -r index.html /tmp/triton-docs/ - cp -r .nojekyll /tmp/triton-docs/ - rm -r * - cp -r /tmp/triton-docs/* . - # ln -s master/index.html . - # mv master docs - git add . - git commit -am "[GH-PAGES] Updated website" - # publish docs - eval `ssh-agent -s` - DISPLAY=:0 SSH_ASKPASS=~/.ssh/give_pass.sh ssh-add ${{ secrets.SSH_KEY }} <<< ${{ secrets.SSH_PASS }} - git remote set-url origin git@github.com:openai/triton.git - git push diff --git a/.github/workflows/integration-tests.yml b/.github/workflows/integration-tests.yml index 8d9ec237f0d4..617fa13ac104 100644 --- a/.github/workflows/integration-tests.yml +++ b/.github/workflows/integration-tests.yml @@ -5,50 +5,88 @@ on: pull_request: branches: - master - + - triton-mlir jobs: + Runner-Preparation: + runs-on: ubuntu-latest + outputs: + matrix: ${{ steps.set-matrix.outputs.matrix }} + steps: + - name: Prepare runner matrix + id: set-matrix + run: | + if [ x"${{ github.repository }}" == x"openai/triton" ]; then + echo '::set-output name=matrix::[["self-hosted", "A10"], ["self-hosted", "V100"], "macos-10.15"]' + else + echo '::set-output name=matrix::["ubuntu-latest", "macos-10.15"]' + fi Integration-Tests: - - runs-on: [self-hosted, V100] + needs: Runner-Preparation - steps: + runs-on: ${{ matrix.runner }} + + strategy: + matrix: + runner: ${{fromJson(needs.Runner-Preparation.outputs.matrix)}} + steps: - name: Checkout uses: actions/checkout@v2 - name: Clear cache run: | - rm -r ~/.triton/ - continue-on-error: true + rm -rf ~/.triton/cache/ - - name: Install Triton + - name: Check imports + if: ${{ matrix.runner != 'macos-10.15' }} run: | - alias python='python3' - cd python - pip3 install -e '.[tests]' + pip install isort + isort -c ./python || ( echo '::error title=Imports not sorted::Please run \"isort ./python\"' ; exit 1 ) - - name: Check imports - run: "isort -c ./python || ( echo '::error title=Imports not sorted::Please run \"isort ./python\"' ; exit 1 )" + - name: Check python style + if: ${{ matrix.runner != 'macos-10.15' }} + run: | + pip install autopep8 + autopep8 -a -r -d --exit-code ./python || ( echo '::error title=Style issues::Please run \"autopep8 -a -r -i ./python\"' ; exit 1 ) - - name: Check style - run: "autopep8 -a -r -d --exit-code ./python || ( echo '::error title=Style issues::Please run \"autopep8 -a -r -i ./python\"' ; exit 1 )" + - name: Check cpp style + if: ${{ matrix.runner != 'macos-10.15' }} + run: | + pip install clang-format + find . -regex '.*\.\(cpp\|hpp\|h\|cc\)' -not -path "./python/build/*" -not -path "./include/triton/external/*" -print0 | xargs -0 -n1 clang-format -style=file --dry-run -Werror -i || + (echo '::error title=Style issues:: Please run `find . -regex ".*\.\(cpp\|hpp\|h\|cc\)" -not -path "./python/build/*" -not -path "./include/triton/external/*" -print0 | xargs -0 -n1 clang-format -style=file -i`' ; exit 1) - name: Flake8 - run: "flake8 --config ./python/setup.cfg ./python || ( echo '::error::Flake8 failed; see logs for errors.' ; exit 1 )" + if: ${{ matrix.runner != 'macos-10.15' }} + run: | + pip install flake8 + flake8 --config ./python/setup.cfg ./python || ( echo '::error::Flake8 failed; see logs for errors.' ; exit 1 ) + + - name: Install Triton + run: | + cd python + TRITON_USE_ASSERT_ENABLED_LLVM=TRUE pip3 install -e '.[tests]' - - name: Unit tests + - name: Run lit tests run: | - cd python/test/unit - pytest -vs . + cd python + LIT_TEST_DIR="build/$(ls build)/test" + if [ ! -d "$LIT_TEST_DIR" ]; then + echo "Not found `$LIT_TEST_DIR`. Did you change an installation method?" ; exit -1 + fi + lit -v "$LIT_TEST_DIR" + + - name: Run python tests + if: ${{matrix.runner[0] == 'self-hosted'}} + run: | + cd python/test/unit/ + pytest + - - name: Regression tests + - name: Run CXX unittests run: | - cd python/test/regression - sudo nvidia-smi -i 0 -pm 1 - sudo nvidia-smi -i 0 --lock-gpu-clocks=1350,1350 - sudo nvidia-smi -i 0 --lock-memory-clocks=877,877 - pytest -vs . - sudo nvidia-smi -i 0 -rgc - sudo nvidia-smi -i 0 -rmc + cd python/ + cd "build/$(ls build)" + ctest diff --git a/.gitignore b/.gitignore index 95d59cfed426..30a8cf52f9a7 100644 --- a/.gitignore +++ b/.gitignore @@ -1,12 +1,20 @@ +# Triton builds build/ -__pycache__ -.pytest_cache - +# Triton Python module builds python/build/ python/triton.egg-info/ python/triton/_C/libtriton.pyd python/triton/_C/libtriton.so +# Python caches +__pycache__ +.pytest_cache + +# VS Code project files .vscode -.vs \ No newline at end of file +.vs + +# JetBrains project files +.idea +cmake-build-* diff --git a/CMakeLists.txt b/CMakeLists.txt index deebd160cc2c..926c2ee1dcdb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -3,6 +3,8 @@ include(ExternalProject) set(CMAKE_CXX_STANDARD 17) +set(CMAKE_INCLUDE_CURRENT_DIR ON) + project(triton) include(CTest) if(NOT WIN32) @@ -10,8 +12,16 @@ if(NOT WIN32) endif() # Options -option(BUILD_TUTORIALS "Build C++ Triton tutorials" ON) -option(BUILD_PYTHON_MODULE "Build Python Triton bindings" OFF) +option(TRITON_BUILD_TUTORIALS "Build C++ Triton tutorials" ON) +option(TRITON_BUILD_PYTHON_MODULE "Build Python Triton bindings" OFF) + +# Ensure Python3 vars are set correctly +# used conditionally in this file and by lit tests +find_package(Python3 REQUIRED COMPONENTS Development Interpreter) + +# Customized release build type with assertions: TritonRelBuildWithAsserts +set(CMAKE_C_FLAGS_TRITONRELBUILDWITHASSERTS "-O2 -g") +set(CMAKE_CXX_FLAGS_TRITONRELBUILDWITHASSERTS "-O2 -g") # Default build type if(NOT CMAKE_BUILD_TYPE) @@ -35,13 +45,18 @@ if(WIN32) add_subdirectory(deps/dlfcn-win32/src ${CMAKE_BINARY_DIR}/dlfcn-win32) endif() -set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -D__STDC_FORMAT_MACROS -std=gnu++17") +set(CMAKE_CXX_FLAGS "${CMAKE_C_FLAGS} -D__STDC_FORMAT_MACROS -fPIC -std=gnu++17 -fvisibility=hidden -fvisibility-inlines-hidden") +if(APPLE) + set(CMAKE_OSX_DEPLOYMENT_TARGET 11.6) +endif() + ########## # LLVM ########## -if("${LLVM_LIBRARY_DIR}" STREQUAL "") +if (NOT MLIR_DIR) + if(NOT LLVM_LIBRARY_DIR) if(WIN32) find_package(LLVM 13 REQUIRED COMPONENTS nvptx amdgpu) @@ -60,95 +75,148 @@ if("${LLVM_LIBRARY_DIR}" STREQUAL "") if(APPLE) set(CMAKE_OSX_DEPLOYMENT_TARGET "10.14") endif() -# sometimes we don't want to use llvm-config, since it may have been downloaded for some specific linux distros -else() + # sometimes we don't want to use llvm-config, since it may have been downloaded for some specific linux distros + else() set(LLVM_LDFLAGS "-L${LLVM_LIBRARY_DIR}") set(LLVM_LIBRARIES -libLLVMNVPTXCodeGen.a -libLLVMNVPTXDesc.a -libLLVMNVPTXInfo.a -libLLVMAMDGPUDisassembler.a -libLLVMMCDisassembler.a -libLLVMAMDGPUCodeGen.a -libLLVMMIRParser.a -libLLVMGlobalISel.a -libLLVMSelectionDAG.a -libLLVMipo.a -libLLVMInstrumentation.a -libLLVMVectorize.a -libLLVMLinker.a -libLLVMIRReader.a -libLLVMAsmParser.a -libLLVMFrontendOpenMP.a -libLLVMAsmPrinter.a -libLLVMDebugInfoDWARF.a -libLLVMCodeGen.a -libLLVMTarget.a -libLLVMScalarOpts.a -libLLVMInstCombine.a -libLLVMAggressiveInstCombine.a -libLLVMTransformUtils.a -libLLVMBitWriter.a -libLLVMAnalysis.a -libLLVMProfileData.a -libLLVMObject.a -libLLVMTextAPI.a -libLLVMBitReader.a -libLLVMAMDGPUAsmParser.a -libLLVMMCParser.a -libLLVMAMDGPUDesc.a -libLLVMAMDGPUUtils.a -libLLVMMC.a -libLLVMDebugInfoCodeView.a -libLLVMDebugInfoMSF.a -libLLVMCore.a -libLLVMRemarks.a -libLLVMBitstreamReader.a -libLLVMBinaryFormat.a -libLLVMAMDGPUInfo.a -libLLVMSupport.a -libLLVMDemangle.a -libLLVMPasses.a -libLLVMAnalysis.a -libLLVMTransformUtils.a -libLLVMScalarOpts.a -libLLVMTransformUtils.a -libLLVMipo.a -libLLVMObjCARCOpts.a -libLLVMCoroutines.a -libLLVMAnalysis.a -) + libLLVMNVPTXCodeGen.a + libLLVMNVPTXDesc.a + libLLVMNVPTXInfo.a + libLLVMAMDGPUDisassembler.a + libLLVMMCDisassembler.a + libLLVMAMDGPUCodeGen.a + libLLVMMIRParser.a + libLLVMGlobalISel.a + libLLVMSelectionDAG.a + libLLVMipo.a + libLLVMInstrumentation.a + libLLVMVectorize.a + libLLVMLinker.a + libLLVMIRReader.a + libLLVMAsmParser.a + libLLVMFrontendOpenMP.a + libLLVMAsmPrinter.a + libLLVMDebugInfoDWARF.a + libLLVMCodeGen.a + libLLVMTarget.a + libLLVMScalarOpts.a + libLLVMInstCombine.a + libLLVMAggressiveInstCombine.a + libLLVMTransformUtils.a + libLLVMBitWriter.a + libLLVMAnalysis.a + libLLVMProfileData.a + libLLVMObject.a + libLLVMTextAPI.a + libLLVMBitReader.a + libLLVMAMDGPUAsmParser.a + libLLVMMCParser.a + libLLVMAMDGPUDesc.a + libLLVMAMDGPUUtils.a + libLLVMMC.a + libLLVMDebugInfoCodeView.a + libLLVMDebugInfoMSF.a + libLLVMCore.a + libLLVMRemarks.a + libLLVMBitstreamReader.a + libLLVMBinaryFormat.a + libLLVMAMDGPUInfo.a + libLLVMSupport.a + libLLVMDemangle.a + libLLVMPasses.a + libLLVMAnalysis.a + libLLVMTransformUtils.a + libLLVMScalarOpts.a + libLLVMTransformUtils.a + libLLVMipo.a + libLLVMObjCARCOpts.a + libLLVMCoroutines.a + libLLVMAnalysis.a + ) + endif() + set (MLIR_DIR ${LLVM_LIBRARY_DIR}/cmake/mlir) endif() -include_directories("${LLVM_INCLUDE_DIRS}") # Python module -if(BUILD_PYTHON_MODULE) +if(TRITON_BUILD_PYTHON_MODULE) message(STATUS "Adding Python module") - # Build CUTLASS python wrapper if requested set(PYTHON_SRC_PATH ${CMAKE_CURRENT_SOURCE_DIR}/python/src) - set(CUTLASS_INCLUDE_DIR "$ENV{CUTLASS_INCLUDE_DIR}") - set(CUTLASS_LIBRARY_DIR "$ENV{CUTLASS_LIBRARY_DIR}") - if(NOT("${CUTLASS_INCLUDE_DIR}" STREQUAL "") AND NOT("${CUTLASS_LIBRARY_DIR}" STREQUAL "")) - set(CUTLASS_SRC ${PYTHON_SRC_PATH}/cutlass.cc) - add_definitions(-DWITH_CUTLASS_BINDINGS) - set(CUTLASS_LIBRARIES "cutlass.a") + set(PYTHON_SRC ${PYTHON_SRC_PATH}/main.cc ${PYTHON_SRC_PATH}/triton.cc) + include_directories("." ${PYTHON_SRC_PATH}) + if (PYTHON_INCLUDE_DIRS) + include_directories(${PYTHON_INCLUDE_DIRS}) + else() + include_directories(${Python3_INCLUDE_DIRS}) + link_directories(${Python3_LIBRARY_DIRS}) + link_libraries(${Python3_LIBRARIES}) + add_link_options(${Python3_LINK_OPTIONS}) endif() - include_directories("." ${PYTHON_SRC_PATH} ${PYTHON_INCLUDE_DIRS} ${CUTLASS_INCLUDE_DIR}) - link_directories(${PYTHON_LINK_DIRS} ${CUTLASS_LIBRARY_DIR}) - set(PYTHON_SRC ${PYTHON_SRC_PATH}/main.cc ${PYTHON_SRC_PATH}/triton.cc ${PYTHON_SRC_PATH}/superblock.cc ${CUTLASS_SRC}) endif() -# Triton -file(GLOB_RECURSE LIBTRITON_SRC lib/*.cc) -if (WIN32 AND BUILD_PYTHON_MODULE) - find_package(Python3 REQUIRED COMPONENTS Development) - Python3_add_library(triton SHARED ${LIBTRITON_SRC} ${PYTHON_SRC}) - set_target_properties(triton PROPERTIES SUFFIX ".pyd") - set_target_properties(triton PROPERTIES PREFIX "lib") -else() - add_library(triton SHARED ${LIBTRITON_SRC} ${PYTHON_SRC}) -endif() +# # Triton +# file(GLOB_RECURSE LIBTRITON_SRC lib/*.cc) +# if (WIN32 AND TRITON_BUILD_PYTHON_MODULE) +# Python3_add_library(triton SHARED ${LIBTRITON_SRC} ${PYTHON_SRC}) +# set_target_properties(triton PROPERTIES SUFFIX ".pyd") +# set_target_properties(triton PROPERTIES PREFIX "lib") +# else() +# add_library(triton SHARED ${LIBTRITON_SRC} ${PYTHON_SRC}) +# endif() + + +# MLIR +find_package(MLIR REQUIRED CONFIG PATHS ${MLIR_DIR}) + +list(APPEND CMAKE_MODULE_PATH "${MLIR_CMAKE_DIR}") +list(APPEND CMAKE_MODULE_PATH "${LLVM_CMAKE_DIR}") + +include(TableGen) # required by AddMLIR +include(AddLLVM) +include(AddMLIR) + +# Disable warnings that show up in external code (gtest;pybind11) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Werror -Wno-covered-switch-default") + +include_directories(${MLIR_INCLUDE_DIRS}) +include_directories(${LLVM_INCLUDE_DIRS}) +include_directories(${PROJECT_SOURCE_DIR}/include) +include_directories(${PROJECT_BINARY_DIR}/include) # Tablegen'd files +# link_directories(${LLVM_LIBRARY_DIR}) + +add_subdirectory(include) +add_subdirectory(lib) +add_subdirectory(bin) + +add_library(triton SHARED ${PYTHON_SRC}) + +# find_package(PythonLibs REQUIRED) + +set(TRITON_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}") +set(TRITON_BINARY_DIR "${CMAKE_CURRENT_BINARY_DIR}") + +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) +get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS) + +target_link_libraries(triton + TritonAnalysis + TritonTransforms + TritonGPUTransforms + TritonLLVMIR + TritonPTX + ${dialect_libs} + ${conversion_libs} + # optimizations + MLIRPass + MLIRTransforms + MLIRLLVMIR + MLIRSupport + MLIRTargetLLVMIRExport + MLIRExecutionEngine + MLIRMathToLLVM + MLIRNVVMToLLVMIRTranslation + MLIRIR +) target_link_options(triton PRIVATE ${LLVM_LDFLAGS}) @@ -159,7 +227,7 @@ else() endif() -if(BUILD_PYTHON_MODULE AND NOT WIN32) +if(TRITON_BUILD_PYTHON_MODULE AND NOT WIN32) set(CMAKE_SHARED_LIBRARY_SUFFIX ".so") # Check if the platform is MacOS if(APPLE) @@ -167,3 +235,7 @@ if(BUILD_PYTHON_MODULE AND NOT WIN32) endif() target_link_libraries(triton ${CUTLASS_LIBRARIES} ${PYTHON_LDFLAGS}) endif() + +add_subdirectory(test) + +add_subdirectory(unittest) diff --git a/README.md b/README.md index bab417daabad..8790b07d4715 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ # Triton -This is the development repository of Triton, a language and compiler for writing highly efficient custom Deep-Learning primitives. The aim of Triton is to provide an open-source environment for expressing tensor math workloads that offers high flexibility, developer productivity and end to end performance. +This is the development repository of Triton, a language and compiler for writing highly efficient custom Deep-Learning primitives. The aim of Triton is to provide an open-source environment to write fast code at higher productivity than CUDA, but also with higher flexibility than other existing DSLs. The foundations of this project are described in the following MAPL2019 publication: [Triton: An Intermediate Language and Compiler for Tiled Neural Network Computations](http://www.eecs.harvard.edu/~htk/publication/2019-mapl-tillet-kung-cox.pdf). Please consider citing this work if you use Triton! diff --git a/bin/CMakeLists.txt b/bin/CMakeLists.txt new file mode 100644 index 000000000000..7fb68f48af75 --- /dev/null +++ b/bin/CMakeLists.txt @@ -0,0 +1,60 @@ +add_subdirectory(FileCheck) +# add_llvm_executable(FileCheck FileCheck/FileCheck.cpp) +# target_link_libraries(FileCheck PRIVATE LLVMFileCheck LLVMSupport) + +get_property(dialect_libs GLOBAL PROPERTY MLIR_DIALECT_LIBS) +get_property(conversion_libs GLOBAL PROPERTY MLIR_CONVERSION_LIBS) + +add_llvm_executable(triton-opt triton-opt.cpp PARTIAL_SOURCES_INTENDED) + +# TODO: what's this? +llvm_update_compile_flags(triton-opt) +target_link_libraries(triton-opt PRIVATE + TritonAnalysis + TritonTransforms + TritonGPUTransforms + ${dialect_libs} + ${conversion_libs} + # tests + TritonTestAnalysis + # MLIR core + MLIROptLib + MLIRPass + MLIRTransforms +) + +mlir_check_all_link_libraries(triton-opt) + + +# add_llvm_executable(triton-translate triton-translate.cpp PARTIAL_SOURCES_INTENDED) +#llvm_update_compile_flags(triton-translate) +# target_link_libraries(triton-translate PRIVATE +# TritonAnalysis +# TritonTransforms +# TritonGPUTransforms +# TritonLLVMIR +# TritonDriver +# ${dialect_libs} +# ${conversion_libs} +# # tests +# TritonTestAnalysis + +# LLVMCore +# LLVMSupport +# LLVMOption +# LLVMCodeGen +# LLVMAsmParser + +# # MLIR core +# MLIROptLib +# MLIRIR +# MLIRPass +# MLIRSupport +# MLIRTransforms +# MLIRExecutionEngine +# MLIRMathToLLVM +# MLIRTransformUtils +# MLIRLLVMToLLVMIRTranslation +# MLIRNVVMToLLVMIRTranslation +# ) +# mlir_check_all_link_libraries(triton-translate) diff --git a/bin/FileCheck/CMakeLists.txt b/bin/FileCheck/CMakeLists.txt new file mode 100644 index 000000000000..1b683be51911 --- /dev/null +++ b/bin/FileCheck/CMakeLists.txt @@ -0,0 +1,2 @@ +add_llvm_executable(FileCheck FileCheck.cpp) +target_link_libraries(FileCheck PRIVATE LLVMFileCheck LLVMSupport) \ No newline at end of file diff --git a/bin/FileCheck/FileCheck.cpp b/bin/FileCheck/FileCheck.cpp new file mode 100644 index 000000000000..819efc35411c --- /dev/null +++ b/bin/FileCheck/FileCheck.cpp @@ -0,0 +1,882 @@ +//===- FileCheck.cpp - Check that File's Contents match what is expected --===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// FileCheck does a line-by line check of a file that validates whether it +// contains the expected content. This is useful for regression tests etc. +// +// This program exits with an exit status of 2 on error, exit status of 0 if +// the file matched the expected contents, and exit status of 1 if it did not +// contain the expected contents. +// +//===----------------------------------------------------------------------===// + +#include "llvm/FileCheck/FileCheck.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/InitLLVM.h" +#include "llvm/Support/Process.h" +#include "llvm/Support/WithColor.h" +#include "llvm/Support/raw_ostream.h" +#include +#include +using namespace llvm; + +static cl::extrahelp FileCheckOptsEnv( + "\nOptions are parsed from the environment variable FILECHECK_OPTS and\n" + "from the command line.\n"); + +static cl::opt + CheckFilename(cl::Positional, cl::desc(""), cl::Optional); + +static cl::opt + InputFilename("input-file", cl::desc("File to check (defaults to stdin)"), + cl::init("-"), cl::value_desc("filename")); + +static cl::list CheckPrefixes( + "check-prefix", + cl::desc("Prefix to use from check file (defaults to 'CHECK')")); +static cl::alias CheckPrefixesAlias( + "check-prefixes", cl::aliasopt(CheckPrefixes), cl::CommaSeparated, + cl::NotHidden, + cl::desc( + "Alias for -check-prefix permitting multiple comma separated values")); + +static cl::list CommentPrefixes( + "comment-prefixes", cl::CommaSeparated, cl::Hidden, + cl::desc("Comma-separated list of comment prefixes to use from check file\n" + "(defaults to 'COM,RUN'). Please avoid using this feature in\n" + "LLVM's LIT-based test suites, which should be easier to\n" + "maintain if they all follow a consistent comment style. This\n" + "feature is meant for non-LIT test suites using FileCheck.")); + +static cl::opt NoCanonicalizeWhiteSpace( + "strict-whitespace", + cl::desc("Do not treat all horizontal whitespace as equivalent")); + +static cl::opt IgnoreCase("ignore-case", + cl::desc("Use case-insensitive matching")); + +static cl::list ImplicitCheckNot( + "implicit-check-not", + cl::desc("Add an implicit negative check with this pattern to every\n" + "positive check. This can be used to ensure that no instances of\n" + "this pattern occur which are not matched by a positive pattern"), + cl::value_desc("pattern")); + +static cl::list + GlobalDefines("D", cl::AlwaysPrefix, + cl::desc("Define a variable to be used in capture patterns."), + cl::value_desc("VAR=VALUE")); + +static cl::opt AllowEmptyInput( + "allow-empty", cl::init(false), + cl::desc("Allow the input file to be empty. This is useful when making\n" + "checks that some error message does not occur, for example.")); + +static cl::opt AllowUnusedPrefixes( + "allow-unused-prefixes", cl::init(false), cl::ZeroOrMore, + cl::desc("Allow prefixes to be specified but not appear in the test.")); + +static cl::opt MatchFullLines( + "match-full-lines", cl::init(false), + cl::desc("Require all positive matches to cover an entire input line.\n" + "Allows leading and trailing whitespace if --strict-whitespace\n" + "is not also passed.")); + +static cl::opt EnableVarScope( + "enable-var-scope", cl::init(false), + cl::desc("Enables scope for regex variables. Variables with names that\n" + "do not start with '$' will be reset at the beginning of\n" + "each CHECK-LABEL block.")); + +static cl::opt AllowDeprecatedDagOverlap( + "allow-deprecated-dag-overlap", cl::init(false), + cl::desc("Enable overlapping among matches in a group of consecutive\n" + "CHECK-DAG directives. This option is deprecated and is only\n" + "provided for convenience as old tests are migrated to the new\n" + "non-overlapping CHECK-DAG implementation.\n")); + +static cl::opt Verbose( + "v", cl::init(false), cl::ZeroOrMore, + cl::desc("Print directive pattern matches, or add them to the input dump\n" + "if enabled.\n")); + +static cl::opt VerboseVerbose( + "vv", cl::init(false), cl::ZeroOrMore, + cl::desc("Print information helpful in diagnosing internal FileCheck\n" + "issues, or add it to the input dump if enabled. Implies\n" + "-v.\n")); + +// The order of DumpInputValue members affects their precedence, as documented +// for -dump-input below. +enum DumpInputValue { + DumpInputNever, + DumpInputFail, + DumpInputAlways, + DumpInputHelp +}; + +static cl::list DumpInputs( + "dump-input", + cl::desc("Dump input to stderr, adding annotations representing\n" + "currently enabled diagnostics. When there are multiple\n" + "occurrences of this option, the that appears earliest\n" + "in the list below has precedence. The default is 'fail'.\n"), + cl::value_desc("mode"), + cl::values(clEnumValN(DumpInputHelp, "help", "Explain input dump and quit"), + clEnumValN(DumpInputAlways, "always", "Always dump input"), + clEnumValN(DumpInputFail, "fail", "Dump input on failure"), + clEnumValN(DumpInputNever, "never", "Never dump input"))); + +// The order of DumpInputFilterValue members affects their precedence, as +// documented for -dump-input-filter below. +enum DumpInputFilterValue { + DumpInputFilterError, + DumpInputFilterAnnotation, + DumpInputFilterAnnotationFull, + DumpInputFilterAll +}; + +static cl::list DumpInputFilters( + "dump-input-filter", + cl::desc("In the dump requested by -dump-input, print only input lines of\n" + "kind plus any context specified by -dump-input-context.\n" + "When there are multiple occurrences of this option, the \n" + "that appears earliest in the list below has precedence. The\n" + "default is 'error' when -dump-input=fail, and it's 'all' when\n" + "-dump-input=always.\n"), + cl::values(clEnumValN(DumpInputFilterAll, "all", "All input lines"), + clEnumValN(DumpInputFilterAnnotationFull, "annotation-full", + "Input lines with annotations"), + clEnumValN(DumpInputFilterAnnotation, "annotation", + "Input lines with starting points of annotations"), + clEnumValN(DumpInputFilterError, "error", + "Input lines with starting points of error " + "annotations"))); + +static cl::list DumpInputContexts( + "dump-input-context", cl::value_desc("N"), + cl::desc("In the dump requested by -dump-input, print input lines\n" + "before and input lines after any lines specified by\n" + "-dump-input-filter. When there are multiple occurrences of\n" + "this option, the largest specified has precedence. The\n" + "default is 5.\n")); + +typedef cl::list::const_iterator prefix_iterator; + +static void DumpCommandLine(int argc, char **argv) { + errs() << "FileCheck command line: "; + for (int I = 0; I < argc; I++) + errs() << " " << argv[I]; + errs() << "\n"; +} + +struct MarkerStyle { + /// The starting char (before tildes) for marking the line. + char Lead; + /// What color to use for this annotation. + raw_ostream::Colors Color; + /// A note to follow the marker, or empty string if none. + std::string Note; + /// Does this marker indicate inclusion by -dump-input-filter=error? + bool FiltersAsError; + MarkerStyle() {} + MarkerStyle(char Lead, raw_ostream::Colors Color, + const std::string &Note = "", bool FiltersAsError = false) + : Lead(Lead), Color(Color), Note(Note), FiltersAsError(FiltersAsError) { + assert((!FiltersAsError || !Note.empty()) && + "expected error diagnostic to have note"); + } +}; + +static MarkerStyle GetMarker(FileCheckDiag::MatchType MatchTy) { + switch (MatchTy) { + case FileCheckDiag::MatchFoundAndExpected: + return MarkerStyle('^', raw_ostream::GREEN); + case FileCheckDiag::MatchFoundButExcluded: + return MarkerStyle('!', raw_ostream::RED, "error: no match expected", + /*FiltersAsError=*/true); + case FileCheckDiag::MatchFoundButWrongLine: + return MarkerStyle('!', raw_ostream::RED, "error: match on wrong line", + /*FiltersAsError=*/true); + case FileCheckDiag::MatchFoundButDiscarded: + return MarkerStyle('!', raw_ostream::CYAN, + "discard: overlaps earlier match"); + case FileCheckDiag::MatchFoundErrorNote: + // Note should always be overridden within the FileCheckDiag. + return MarkerStyle('!', raw_ostream::RED, + "error: unknown error after match", + /*FiltersAsError=*/true); + case FileCheckDiag::MatchNoneAndExcluded: + return MarkerStyle('X', raw_ostream::GREEN); + case FileCheckDiag::MatchNoneButExpected: + return MarkerStyle('X', raw_ostream::RED, "error: no match found", + /*FiltersAsError=*/true); + case FileCheckDiag::MatchNoneForInvalidPattern: + return MarkerStyle('X', raw_ostream::RED, + "error: match failed for invalid pattern", + /*FiltersAsError=*/true); + case FileCheckDiag::MatchFuzzy: + return MarkerStyle('?', raw_ostream::MAGENTA, "possible intended match", + /*FiltersAsError=*/true); + } + llvm_unreachable_internal("unexpected match type"); +} + +static void DumpInputAnnotationHelp(raw_ostream &OS) { + OS << "The following description was requested by -dump-input=help to\n" + << "explain the input dump printed by FileCheck.\n" + << "\n" + << "Related command-line options:\n" + << "\n" + << " - -dump-input= enables or disables the input dump\n" + << " - -dump-input-filter= filters the input lines\n" + << " - -dump-input-context= adjusts the context of filtered lines\n" + << " - -v and -vv add more annotations\n" + << " - -color forces colors to be enabled both in the dump and below\n" + << " - -help documents the above options in more detail\n" + << "\n" + << "These options can also be set via FILECHECK_OPTS. For example, for\n" + << "maximum debugging output on failures:\n" + << "\n" + << " $ FILECHECK_OPTS='-dump-input-filter=all -vv -color' ninja check\n" + << "\n" + << "Input dump annotation format:\n" + << "\n"; + + // Labels for input lines. + OS << " - "; + WithColor(OS, raw_ostream::SAVEDCOLOR, true) << "L:"; + OS << " labels line number L of the input file\n" + << " An extra space is added after each input line to represent" + << " the\n" + << " newline character\n"; + + // Labels for annotation lines. + OS << " - "; + WithColor(OS, raw_ostream::SAVEDCOLOR, true) << "T:L"; + OS << " labels the only match result for either (1) a pattern of type T" + << " from\n" + << " line L of the check file if L is an integer or (2) the" + << " I-th implicit\n" + << " pattern if L is \"imp\" followed by an integer " + << "I (index origin one)\n"; + OS << " - "; + WithColor(OS, raw_ostream::SAVEDCOLOR, true) << "T:L'N"; + OS << " labels the Nth match result for such a pattern\n"; + + // Markers on annotation lines. + OS << " - "; + WithColor(OS, raw_ostream::SAVEDCOLOR, true) << "^~~"; + OS << " marks good match (reported if -v)\n" + << " - "; + WithColor(OS, raw_ostream::SAVEDCOLOR, true) << "!~~"; + OS << " marks bad match, such as:\n" + << " - CHECK-NEXT on same line as previous match (error)\n" + << " - CHECK-NOT found (error)\n" + << " - CHECK-DAG overlapping match (discarded, reported if " + << "-vv)\n" + << " - "; + WithColor(OS, raw_ostream::SAVEDCOLOR, true) << "X~~"; + OS << " marks search range when no match is found, such as:\n" + << " - CHECK-NEXT not found (error)\n" + << " - CHECK-NOT not found (success, reported if -vv)\n" + << " - CHECK-DAG not found after discarded matches (error)\n" + << " - "; + WithColor(OS, raw_ostream::SAVEDCOLOR, true) << "?"; + OS << " marks fuzzy match when no match is found\n"; + + // Elided lines. + OS << " - "; + WithColor(OS, raw_ostream::SAVEDCOLOR, true) << "..."; + OS << " indicates elided input lines and annotations, as specified by\n" + << " -dump-input-filter and -dump-input-context\n"; + + // Colors. + OS << " - colors "; + WithColor(OS, raw_ostream::GREEN, true) << "success"; + OS << ", "; + WithColor(OS, raw_ostream::RED, true) << "error"; + OS << ", "; + WithColor(OS, raw_ostream::MAGENTA, true) << "fuzzy match"; + OS << ", "; + WithColor(OS, raw_ostream::CYAN, true, false) << "discarded match"; + OS << ", "; + WithColor(OS, raw_ostream::CYAN, true, true) << "unmatched input"; + OS << "\n"; +} + +/// An annotation for a single input line. +struct InputAnnotation { + /// The index of the match result across all checks + unsigned DiagIndex; + /// The label for this annotation. + std::string Label; + /// Is this the initial fragment of a diagnostic that has been broken across + /// multiple lines? + bool IsFirstLine; + /// What input line (one-origin indexing) this annotation marks. This might + /// be different from the starting line of the original diagnostic if + /// !IsFirstLine. + unsigned InputLine; + /// The column range (one-origin indexing, open end) in which to mark the + /// input line. If InputEndCol is UINT_MAX, treat it as the last column + /// before the newline. + unsigned InputStartCol, InputEndCol; + /// The marker to use. + MarkerStyle Marker; + /// Whether this annotation represents a good match for an expected pattern. + bool FoundAndExpectedMatch; +}; + +/// Get an abbreviation for the check type. +static std::string GetCheckTypeAbbreviation(Check::FileCheckType Ty) { + switch (Ty) { + case Check::CheckPlain: + if (Ty.getCount() > 1) + return "count"; + return "check"; + case Check::CheckNext: + return "next"; + case Check::CheckSame: + return "same"; + case Check::CheckNot: + return "not"; + case Check::CheckDAG: + return "dag"; + case Check::CheckLabel: + return "label"; + case Check::CheckEmpty: + return "empty"; + case Check::CheckComment: + return "com"; + case Check::CheckEOF: + return "eof"; + case Check::CheckBadNot: + return "bad-not"; + case Check::CheckBadCount: + return "bad-count"; + case Check::CheckNone: + llvm_unreachable("invalid FileCheckType"); + } + llvm_unreachable("unknown FileCheckType"); +} + +static void +BuildInputAnnotations(const SourceMgr &SM, unsigned CheckFileBufferID, + const std::pair &ImpPatBufferIDRange, + const std::vector &Diags, + std::vector &Annotations, + unsigned &LabelWidth) { + struct CompareSMLoc { + bool operator()(const SMLoc &LHS, const SMLoc &RHS) const { + return LHS.getPointer() < RHS.getPointer(); + } + }; + // How many diagnostics does each pattern have? + std::map DiagCountPerPattern; + for (auto Diag : Diags) + ++DiagCountPerPattern[Diag.CheckLoc]; + // How many diagnostics have we seen so far per pattern? + std::map DiagIndexPerPattern; + // How many total diagnostics have we seen so far? + unsigned DiagIndex = 0; + // What's the widest label? + LabelWidth = 0; + for (auto DiagItr = Diags.begin(), DiagEnd = Diags.end(); DiagItr != DiagEnd; + ++DiagItr) { + InputAnnotation A; + A.DiagIndex = DiagIndex++; + + // Build label, which uniquely identifies this check result. + unsigned CheckBufferID = SM.FindBufferContainingLoc(DiagItr->CheckLoc); + auto CheckLineAndCol = + SM.getLineAndColumn(DiagItr->CheckLoc, CheckBufferID); + llvm::raw_string_ostream Label(A.Label); + Label << GetCheckTypeAbbreviation(DiagItr->CheckTy) << ":"; + if (CheckBufferID == CheckFileBufferID) + Label << CheckLineAndCol.first; + else if (ImpPatBufferIDRange.first <= CheckBufferID && + CheckBufferID < ImpPatBufferIDRange.second) + Label << "imp" << (CheckBufferID - ImpPatBufferIDRange.first + 1); + else + llvm_unreachable("expected diagnostic's check location to be either in " + "the check file or for an implicit pattern"); + if (DiagCountPerPattern[DiagItr->CheckLoc] > 1) + Label << "'" << DiagIndexPerPattern[DiagItr->CheckLoc]++; + LabelWidth = std::max((std::string::size_type)LabelWidth, A.Label.size()); + + A.Marker = GetMarker(DiagItr->MatchTy); + if (!DiagItr->Note.empty()) { + A.Marker.Note = DiagItr->Note; + // It's less confusing if notes that don't actually have ranges don't have + // markers. For example, a marker for 'with "VAR" equal to "5"' would + // seem to indicate where "VAR" matches, but the location we actually have + // for the marker simply points to the start of the match/search range for + // the full pattern of which the substitution is potentially just one + // component. + if (DiagItr->InputStartLine == DiagItr->InputEndLine && + DiagItr->InputStartCol == DiagItr->InputEndCol) + A.Marker.Lead = ' '; + } + if (DiagItr->MatchTy == FileCheckDiag::MatchFoundErrorNote) { + assert(!DiagItr->Note.empty() && + "expected custom note for MatchFoundErrorNote"); + A.Marker.Note = "error: " + A.Marker.Note; + } + A.FoundAndExpectedMatch = + DiagItr->MatchTy == FileCheckDiag::MatchFoundAndExpected; + + // Compute the mark location, and break annotation into multiple + // annotations if it spans multiple lines. + A.IsFirstLine = true; + A.InputLine = DiagItr->InputStartLine; + A.InputStartCol = DiagItr->InputStartCol; + if (DiagItr->InputStartLine == DiagItr->InputEndLine) { + // Sometimes ranges are empty in order to indicate a specific point, but + // that would mean nothing would be marked, so adjust the range to + // include the following character. + A.InputEndCol = + std::max(DiagItr->InputStartCol + 1, DiagItr->InputEndCol); + Annotations.push_back(A); + } else { + assert(DiagItr->InputStartLine < DiagItr->InputEndLine && + "expected input range not to be inverted"); + A.InputEndCol = UINT_MAX; + Annotations.push_back(A); + for (unsigned L = DiagItr->InputStartLine + 1, E = DiagItr->InputEndLine; + L <= E; ++L) { + // If a range ends before the first column on a line, then it has no + // characters on that line, so there's nothing to render. + if (DiagItr->InputEndCol == 1 && L == E) + break; + InputAnnotation B; + B.DiagIndex = A.DiagIndex; + B.Label = A.Label; + B.IsFirstLine = false; + B.InputLine = L; + B.Marker = A.Marker; + B.Marker.Lead = '~'; + B.Marker.Note = ""; + B.InputStartCol = 1; + if (L != E) + B.InputEndCol = UINT_MAX; + else + B.InputEndCol = DiagItr->InputEndCol; + B.FoundAndExpectedMatch = A.FoundAndExpectedMatch; + Annotations.push_back(B); + } + } + } +} + +static unsigned FindInputLineInFilter( + DumpInputFilterValue DumpInputFilter, unsigned CurInputLine, + const std::vector::iterator &AnnotationBeg, + const std::vector::iterator &AnnotationEnd) { + if (DumpInputFilter == DumpInputFilterAll) + return CurInputLine; + for (auto AnnotationItr = AnnotationBeg; AnnotationItr != AnnotationEnd; + ++AnnotationItr) { + switch (DumpInputFilter) { + case DumpInputFilterAll: + llvm_unreachable("unexpected DumpInputFilterAll"); + break; + case DumpInputFilterAnnotationFull: + return AnnotationItr->InputLine; + case DumpInputFilterAnnotation: + if (AnnotationItr->IsFirstLine) + return AnnotationItr->InputLine; + break; + case DumpInputFilterError: + if (AnnotationItr->IsFirstLine && AnnotationItr->Marker.FiltersAsError) + return AnnotationItr->InputLine; + break; + } + } + return UINT_MAX; +} + +/// To OS, print a vertical ellipsis (right-justified at LabelWidth) if it would +/// occupy less lines than ElidedLines, but print ElidedLines otherwise. Either +/// way, clear ElidedLines. Thus, if ElidedLines is empty, do nothing. +static void DumpEllipsisOrElidedLines(raw_ostream &OS, std::string &ElidedLines, + unsigned LabelWidth) { + if (ElidedLines.empty()) + return; + unsigned EllipsisLines = 3; + if (EllipsisLines < StringRef(ElidedLines).count('\n')) { + for (unsigned i = 0; i < EllipsisLines; ++i) { + WithColor(OS, raw_ostream::BLACK, /*Bold=*/true) + << right_justify(".", LabelWidth); + OS << '\n'; + } + } else + OS << ElidedLines; + ElidedLines.clear(); +} + +static void DumpAnnotatedInput(raw_ostream &OS, const FileCheckRequest &Req, + DumpInputFilterValue DumpInputFilter, + unsigned DumpInputContext, + StringRef InputFileText, + std::vector &Annotations, + unsigned LabelWidth) { + OS << "Input was:\n<<<<<<\n"; + + // Sort annotations. + llvm::sort(Annotations, + [](const InputAnnotation &A, const InputAnnotation &B) { + // 1. Sort annotations in the order of the input lines. + // + // This makes it easier to find relevant annotations while + // iterating input lines in the implementation below. FileCheck + // does not always produce diagnostics in the order of input + // lines due to, for example, CHECK-DAG and CHECK-NOT. + if (A.InputLine != B.InputLine) + return A.InputLine < B.InputLine; + // 2. Sort annotations in the temporal order FileCheck produced + // their associated diagnostics. + // + // This sort offers several benefits: + // + // A. On a single input line, the order of annotations reflects + // the FileCheck logic for processing directives/patterns. + // This can be helpful in understanding cases in which the + // order of the associated directives/patterns in the check + // file or on the command line either (i) does not match the + // temporal order in which FileCheck looks for matches for the + // directives/patterns (due to, for example, CHECK-LABEL, + // CHECK-NOT, or `--implicit-check-not`) or (ii) does match + // that order but does not match the order of those + // diagnostics along an input line (due to, for example, + // CHECK-DAG). + // + // On the other hand, because our presentation format presents + // input lines in order, there's no clear way to offer the + // same benefit across input lines. For consistency, it might + // then seem worthwhile to have annotations on a single line + // also sorted in input order (that is, by input column). + // However, in practice, this appears to be more confusing + // than helpful. Perhaps it's intuitive to expect annotations + // to be listed in the temporal order in which they were + // produced except in cases the presentation format obviously + // and inherently cannot support it (that is, across input + // lines). + // + // B. When diagnostics' annotations are split among multiple + // input lines, the user must track them from one input line + // to the next. One property of the sort chosen here is that + // it facilitates the user in this regard by ensuring the + // following: when comparing any two input lines, a + // diagnostic's annotations are sorted in the same position + // relative to all other diagnostics' annotations. + return A.DiagIndex < B.DiagIndex; + }); + + // Compute the width of the label column. + const unsigned char *InputFilePtr = InputFileText.bytes_begin(), + *InputFileEnd = InputFileText.bytes_end(); + unsigned LineCount = InputFileText.count('\n'); + if (InputFileEnd[-1] != '\n') + ++LineCount; + unsigned LineNoWidth = std::log10(LineCount) + 1; + // +3 below adds spaces (1) to the left of the (right-aligned) line numbers + // on input lines and (2) to the right of the (left-aligned) labels on + // annotation lines so that input lines and annotation lines are more + // visually distinct. For example, the spaces on the annotation lines ensure + // that input line numbers and check directive line numbers never align + // horizontally. Those line numbers might not even be for the same file. + // One space would be enough to achieve that, but more makes it even easier + // to see. + LabelWidth = std::max(LabelWidth, LineNoWidth) + 3; + + // Print annotated input lines. + unsigned PrevLineInFilter = 0; // 0 means none so far + unsigned NextLineInFilter = 0; // 0 means uncomputed, UINT_MAX means none + std::string ElidedLines; + raw_string_ostream ElidedLinesOS(ElidedLines); + ColorMode TheColorMode = + WithColor(OS).colorsEnabled() ? ColorMode::Enable : ColorMode::Disable; + if (TheColorMode == ColorMode::Enable) + ElidedLinesOS.enable_colors(true); + auto AnnotationItr = Annotations.begin(), AnnotationEnd = Annotations.end(); + for (unsigned Line = 1; + InputFilePtr != InputFileEnd || AnnotationItr != AnnotationEnd; ++Line) { + const unsigned char *InputFileLine = InputFilePtr; + + // Compute the previous and next line included by the filter. + if (NextLineInFilter < Line) + NextLineInFilter = FindInputLineInFilter(DumpInputFilter, Line, + AnnotationItr, AnnotationEnd); + assert(NextLineInFilter && "expected NextLineInFilter to be computed"); + if (NextLineInFilter == Line) + PrevLineInFilter = Line; + + // Elide this input line and its annotations if it's not within the + // context specified by -dump-input-context of an input line included by + // -dump-input-filter. However, in case the resulting ellipsis would occupy + // more lines than the input lines and annotations it elides, buffer the + // elided lines and annotations so we can print them instead. + raw_ostream *LineOS = &OS; + if ((!PrevLineInFilter || PrevLineInFilter + DumpInputContext < Line) && + (NextLineInFilter == UINT_MAX || + Line + DumpInputContext < NextLineInFilter)) + LineOS = &ElidedLinesOS; + else { + LineOS = &OS; + DumpEllipsisOrElidedLines(OS, ElidedLinesOS.str(), LabelWidth); + } + + // Print right-aligned line number. + WithColor(*LineOS, raw_ostream::BLACK, /*Bold=*/true, /*BF=*/false, + TheColorMode) + << format_decimal(Line, LabelWidth) << ": "; + + // For the case where -v and colors are enabled, find the annotations for + // good matches for expected patterns in order to highlight everything + // else in the line. There are no such annotations if -v is disabled. + std::vector FoundAndExpectedMatches; + if (Req.Verbose && TheColorMode == ColorMode::Enable) { + for (auto I = AnnotationItr; I != AnnotationEnd && I->InputLine == Line; + ++I) { + if (I->FoundAndExpectedMatch) + FoundAndExpectedMatches.push_back(*I); + } + } + + // Print numbered line with highlighting where there are no matches for + // expected patterns. + bool Newline = false; + { + WithColor COS(*LineOS, raw_ostream::SAVEDCOLOR, /*Bold=*/false, + /*BG=*/false, TheColorMode); + bool InMatch = false; + if (Req.Verbose) + COS.changeColor(raw_ostream::CYAN, true, true); + for (unsigned Col = 1; InputFilePtr != InputFileEnd && !Newline; ++Col) { + bool WasInMatch = InMatch; + InMatch = false; + for (auto M : FoundAndExpectedMatches) { + if (M.InputStartCol <= Col && Col < M.InputEndCol) { + InMatch = true; + break; + } + } + if (!WasInMatch && InMatch) + COS.resetColor(); + else if (WasInMatch && !InMatch) + COS.changeColor(raw_ostream::CYAN, true, true); + if (*InputFilePtr == '\n') { + Newline = true; + COS << ' '; + } else + COS << *InputFilePtr; + ++InputFilePtr; + } + } + *LineOS << '\n'; + unsigned InputLineWidth = InputFilePtr - InputFileLine; + + // Print any annotations. + while (AnnotationItr != AnnotationEnd && AnnotationItr->InputLine == Line) { + WithColor COS(*LineOS, AnnotationItr->Marker.Color, /*Bold=*/true, + /*BG=*/false, TheColorMode); + // The two spaces below are where the ": " appears on input lines. + COS << left_justify(AnnotationItr->Label, LabelWidth) << " "; + unsigned Col; + for (Col = 1; Col < AnnotationItr->InputStartCol; ++Col) + COS << ' '; + COS << AnnotationItr->Marker.Lead; + // If InputEndCol=UINT_MAX, stop at InputLineWidth. + for (++Col; Col < AnnotationItr->InputEndCol && Col <= InputLineWidth; + ++Col) + COS << '~'; + const std::string &Note = AnnotationItr->Marker.Note; + if (!Note.empty()) { + // Put the note at the end of the input line. If we were to instead + // put the note right after the marker, subsequent annotations for the + // same input line might appear to mark this note instead of the input + // line. + for (; Col <= InputLineWidth; ++Col) + COS << ' '; + COS << ' ' << Note; + } + COS << '\n'; + ++AnnotationItr; + } + } + DumpEllipsisOrElidedLines(OS, ElidedLinesOS.str(), LabelWidth); + + OS << ">>>>>>\n"; +} + +int main(int argc, char **argv) { + // Enable use of ANSI color codes because FileCheck is using them to + // highlight text. + llvm::sys::Process::UseANSIEscapeCodes(true); + + InitLLVM X(argc, argv); + cl::ParseCommandLineOptions(argc, argv, /*Overview*/ "", /*Errs*/ nullptr, + "FILECHECK_OPTS"); + + // Select -dump-input* values. The -help documentation specifies the default + // value and which value to choose if an option is specified multiple times. + // In the latter case, the general rule of thumb is to choose the value that + // provides the most information. + DumpInputValue DumpInput = + DumpInputs.empty() + ? DumpInputFail + : *std::max_element(DumpInputs.begin(), DumpInputs.end()); + DumpInputFilterValue DumpInputFilter; + if (DumpInputFilters.empty()) + DumpInputFilter = DumpInput == DumpInputAlways ? DumpInputFilterAll + : DumpInputFilterError; + else + DumpInputFilter = + *std::max_element(DumpInputFilters.begin(), DumpInputFilters.end()); + unsigned DumpInputContext = DumpInputContexts.empty() + ? 5 + : *std::max_element(DumpInputContexts.begin(), + DumpInputContexts.end()); + + if (DumpInput == DumpInputHelp) { + DumpInputAnnotationHelp(outs()); + return 0; + } + if (CheckFilename.empty()) { + errs() << " not specified\n"; + return 2; + } + + FileCheckRequest Req; + append_range(Req.CheckPrefixes, CheckPrefixes); + + append_range(Req.CommentPrefixes, CommentPrefixes); + + append_range(Req.ImplicitCheckNot, ImplicitCheckNot); + + bool GlobalDefineError = false; + for (StringRef G : GlobalDefines) { + size_t EqIdx = G.find('='); + if (EqIdx == std::string::npos) { + errs() << "Missing equal sign in command-line definition '-D" << G + << "'\n"; + GlobalDefineError = true; + continue; + } + if (EqIdx == 0) { + errs() << "Missing variable name in command-line definition '-D" << G + << "'\n"; + GlobalDefineError = true; + continue; + } + Req.GlobalDefines.push_back(G); + } + if (GlobalDefineError) + return 2; + + Req.AllowEmptyInput = AllowEmptyInput; + Req.AllowUnusedPrefixes = AllowUnusedPrefixes; + Req.EnableVarScope = EnableVarScope; + Req.AllowDeprecatedDagOverlap = AllowDeprecatedDagOverlap; + Req.Verbose = Verbose; + Req.VerboseVerbose = VerboseVerbose; + Req.NoCanonicalizeWhiteSpace = NoCanonicalizeWhiteSpace; + Req.MatchFullLines = MatchFullLines; + Req.IgnoreCase = IgnoreCase; + + if (VerboseVerbose) + Req.Verbose = true; + + FileCheck FC(Req); + if (!FC.ValidateCheckPrefixes()) + return 2; + + Regex PrefixRE = FC.buildCheckPrefixRegex(); + std::string REError; + if (!PrefixRE.isValid(REError)) { + errs() << "Unable to combine check-prefix strings into a prefix regular " + "expression! This is likely a bug in FileCheck's verification of " + "the check-prefix strings. Regular expression parsing failed " + "with the following error: " + << REError << "\n"; + return 2; + } + + SourceMgr SM; + + // Read the expected strings from the check file. + ErrorOr> CheckFileOrErr = + MemoryBuffer::getFileOrSTDIN(CheckFilename, /*IsText=*/true); + if (std::error_code EC = CheckFileOrErr.getError()) { + errs() << "Could not open check file '" << CheckFilename + << "': " << EC.message() << '\n'; + return 2; + } + MemoryBuffer &CheckFile = *CheckFileOrErr.get(); + + SmallString<4096> CheckFileBuffer; + StringRef CheckFileText = FC.CanonicalizeFile(CheckFile, CheckFileBuffer); + + unsigned CheckFileBufferID = + SM.AddNewSourceBuffer(MemoryBuffer::getMemBuffer( + CheckFileText, CheckFile.getBufferIdentifier()), + SMLoc()); + + std::pair ImpPatBufferIDRange; + if (FC.readCheckFile(SM, CheckFileText, PrefixRE, &ImpPatBufferIDRange)) + return 2; + + // Open the file to check and add it to SourceMgr. + ErrorOr> InputFileOrErr = + MemoryBuffer::getFileOrSTDIN(InputFilename, /*IsText=*/true); + if (InputFilename == "-") + InputFilename = ""; // Overwrite for improved diagnostic messages + if (std::error_code EC = InputFileOrErr.getError()) { + errs() << "Could not open input file '" << InputFilename + << "': " << EC.message() << '\n'; + return 2; + } + MemoryBuffer &InputFile = *InputFileOrErr.get(); + + if (InputFile.getBufferSize() == 0 && !AllowEmptyInput) { + errs() << "FileCheck error: '" << InputFilename << "' is empty.\n"; + DumpCommandLine(argc, argv); + return 2; + } + + SmallString<4096> InputFileBuffer; + StringRef InputFileText = FC.CanonicalizeFile(InputFile, InputFileBuffer); + + SM.AddNewSourceBuffer(MemoryBuffer::getMemBuffer( + InputFileText, InputFile.getBufferIdentifier()), + SMLoc()); + + std::vector Diags; + int ExitCode = FC.checkInput(SM, InputFileText, + DumpInput == DumpInputNever ? nullptr : &Diags) + ? EXIT_SUCCESS + : 1; + if (DumpInput == DumpInputAlways || + (ExitCode == 1 && DumpInput == DumpInputFail)) { + errs() << "\n" + << "Input file: " << InputFilename << "\n" + << "Check file: " << CheckFilename << "\n" + << "\n" + << "-dump-input=help explains the following input dump.\n" + << "\n"; + std::vector Annotations; + unsigned LabelWidth; + BuildInputAnnotations(SM, CheckFileBufferID, ImpPatBufferIDRange, Diags, + Annotations, LabelWidth); + DumpAnnotatedInput(errs(), Req, DumpInputFilter, DumpInputContext, + InputFileText, Annotations, LabelWidth); + } + + return ExitCode; +} diff --git a/bin/triton-opt.cpp b/bin/triton-opt.cpp new file mode 100644 index 000000000000..9f3b53b7ae41 --- /dev/null +++ b/bin/triton-opt.cpp @@ -0,0 +1,42 @@ +#include "triton/Dialect/Triton/IR/Dialect.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" + +#include "triton/Dialect/Triton/Transforms/Passes.h" +#include "triton/Dialect/TritonGPU/Transforms/Passes.h" + +#include "triton/Conversion/Passes.h" + +#include "mlir/IR/Dialect.h" +#include "mlir/InitAllPasses.h" +#include "mlir/Support/MlirOptMain.h" + +namespace mlir { +namespace test { +void registerTestAliasPass(); +void registerTestAlignmentPass(); +void registerTestAllocationPass(); +void registerTestMembarPass(); +} // namespace test +} // namespace mlir + +int main(int argc, char **argv) { + mlir::registerAllPasses(); + mlir::registerTritonPasses(); + mlir::registerTritonGPUPasses(); + mlir::test::registerTestAliasPass(); + mlir::test::registerTestAlignmentPass(); + mlir::test::registerTestAllocationPass(); + mlir::test::registerTestMembarPass(); + mlir::triton::registerConvertTritonToTritonGPUPass(); + mlir::triton::registerConvertTritonGPUToLLVMPass(); + + // TODO: register Triton & TritonGPU passes + mlir::DialectRegistry registry; + registry.insert(); + + return mlir::asMainReturnCode(mlir::MlirOptMain( + argc, argv, "Triton (GPU) optimizer driver\n", registry)); +} diff --git a/bin/triton-translate.cpp b/bin/triton-translate.cpp new file mode 100644 index 000000000000..b5c9d5a28aa0 --- /dev/null +++ b/bin/triton-translate.cpp @@ -0,0 +1,131 @@ +#include "mlir/ExecutionEngine/ExecutionEngine.h" +#include "mlir/ExecutionEngine/OptUtils.h" +#include "mlir/IR/AsmState.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/Dialect.h" +#include "mlir/Parser.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "mlir/Support/FileUtilities.h" +#include "mlir/Support/LogicalResult.h" +#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" +#include "mlir/Target/LLVMIR/Export.h" +#include "triton/Conversion/TritonGPUToLLVM/TritonGPUToLLVMPass.h" +#include "triton/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.h" +#include "triton/Dialect/Triton/IR/Dialect.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" +#include "triton/Target/LLVMIR/LLVMIRTranslation.h" +#include "triton/driver/llvm.h" +#include "llvm/IR/LLVMContext.h" +#include "llvm/Support/CommandLine.h" +#include "llvm/Support/InitLLVM.h" +#include "llvm/Support/SourceMgr.h" +#include "llvm/Support/ToolOutputFile.h" +#include + +namespace mlir { +namespace triton { + +OwningOpRef loadMLIRModule(llvm::StringRef inputFilename, + MLIRContext &context) { + std::string errorMessage; + auto input = openInputFile(inputFilename, &errorMessage); + if (!input) { + llvm::errs() << errorMessage << "\n"; + return nullptr; + } + + mlir::DialectRegistry registry; + registry.insert(); + + context.appendDialectRegistry(registry); + + auto processBuffer = [&](std::unique_ptr ownedBuffer) + -> OwningOpRef { + llvm::SourceMgr sourceMgr; + sourceMgr.AddNewSourceBuffer(std::move(ownedBuffer), SMLoc()); + + context.loadAllAvailableDialects(); + context.allowUnregisteredDialects(); + + OwningOpRef module(parseSourceFile(sourceMgr, &context)); + if (!module) { + llvm::errs() << "Parse MLIR file failed."; + return nullptr; + } + + return module; + }; + + auto module = processBuffer(std::move(input)); + if (!module) { + return nullptr; + } + + return module; +} + +LogicalResult tritonTranslateMain(int argc, char **argv, + llvm::StringRef toolName) { + static llvm::cl::opt inputFilename( + llvm::cl::Positional, llvm::cl::desc(""), + llvm::cl::init("-")); + + static llvm::cl::opt outputFilename( + "o", llvm::cl::desc("Output filename"), llvm::cl::value_desc("filename"), + llvm::cl::init("-")); + + static llvm::cl::opt targetKind( + "target", llvm::cl::desc(""), + llvm::cl::value_desc("target"), llvm::cl::init("llvmir")); + + static llvm::cl::opt SMArch("sm", llvm::cl::desc("sm arch"), + llvm::cl::init(80)); + + static llvm::cl::opt ptxVersion( + "ptx-version", llvm::cl::desc("PTX version"), llvm::cl::init(10000)); + + llvm::InitLLVM y(argc, argv); + + registerAsmPrinterCLOptions(); + registerMLIRContextCLOptions(); + llvm::cl::ParseCommandLineOptions(argc, argv, toolName); + + mlir::MLIRContext context; + auto module = loadMLIRModule(inputFilename, context); + if (!module) { + return failure(); + } + + std::string errorMessage; + auto output = openOutputFile(outputFilename, &errorMessage); + if (!output) { + llvm::errs() << errorMessage << "\n"; + return failure(); + } + + llvm::LLVMContext llvmContext; + auto llvmir = + translateTritonGPUToLLVMIR(&llvmContext, *module, SMArch.getValue()); + if (!llvmir) { + llvm::errs() << "Translate to LLVM IR failed"; + } + + if (targetKind == "llvmir") + llvm::outs() << *llvmir << '\n'; + else if (targetKind == "ptx") + llvm::outs() << ::triton::driver::llir_to_ptx( + llvmir.get(), SMArch.getValue(), ptxVersion.getValue()); + + return success(); +} + +} // namespace triton +} // namespace mlir + +int main(int argc, char **argv) { + return failed(mlir::triton::tritonTranslateMain( + argc, argv, "Triton Translate Testing Tool.")); +} diff --git a/deps/dlfcn-win32 b/deps/dlfcn-win32 deleted file mode 160000 index 522c301ec366..000000000000 --- a/deps/dlfcn-win32 +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 522c301ec366e9b42205ae21617780d37cc0e9f0 diff --git a/docs/conf.py b/docs/conf.py index 8a6fabce735f..4d62c5650270 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -45,7 +45,7 @@ def forward_jit_fn(func): def wrapped(obj, **kwargs): import triton - if isinstance(obj, triton.runtime.JITFunction): + if isinstance(obj, triton.code_gen.JITFunction): obj = obj.fn return old(obj) @@ -56,7 +56,7 @@ def wrapped(obj, **kwargs): def documenter(app, obj, parent): import triton - if isinstance(obj, triton.runtime.JITFunction): + if isinstance(obj, triton.code_gen.JITFunction): obj = obj.fn return old_documenter(app, obj, parent) diff --git a/docs/getting-started/installation.rst b/docs/getting-started/installation.rst index 94a8e958a96f..20c4628bc8d4 100644 --- a/docs/getting-started/installation.rst +++ b/docs/getting-started/installation.rst @@ -34,13 +34,11 @@ You can install the Python package from source by running the following commands .. code-block:: bash git clone https://github.com/openai/triton.git; - cd triton; - git submodule update --init --recursive; - cd python; + cd triton/python; pip install cmake; # build time dependency pip install -e . -Note that, if llvm-11 is not present on your system and you are on linux, the setup.py script will download the official LLVM11 static libraries link against that. For windows users, LLVM must be installed and configured in PATH. +Note that, if llvm-11 is not present on your system, the setup.py script will download the official LLVM11 static libraries link against that. You can then test your installation by running the unit tests: diff --git a/docs/programming-guide/chapter-2/related-work.rst b/docs/programming-guide/chapter-2/related-work.rst index e21ec4de74f1..3e2c0c8c5488 100644 --- a/docs/programming-guide/chapter-2/related-work.rst +++ b/docs/programming-guide/chapter-2/related-work.rst @@ -168,7 +168,7 @@ Scheduling languages are, without a doubt, one of the most popular approaches fo Limitations ++++++++++++ -This ease-of-development comes at a cost. First of all, existing systems that follow this paradigm tend to be noticeably slower than Triton on modern hardware when applicable (e.g., V100/A100 tensor cores w/ equal tile sizes). I do believe that this is not a fundamental issue of scheduling languages -- in the sense that it could probably be solved with more efforts -- but it could mean that these systems are harder to engineer. More importantly, existing scheduling languages generate loops whose bounds and increments cannot depend on surrounding loop indice without at least imposing severe constraints on possible schedules -- if not breaking the system entirely. This is problematic for sparse computations, whose iteration spaces may be irregular. +This ease-of-development comes at a cost. First of all, existing systems that follow this paradigm tend to be noticeably slower than Triton on modern hardware when applicable (e.g., V100/A100 tensor cores w/ equal tile sizes). I do believe that this is not a fundamental issue of scheduling languages -- in the sense that it could probably be solved with more efforts -- but it could mean that these systems are harder to engineer. More importantly, existing scheduling languages generate loops whose bounds and increments cannot depend on surrounding loop indices without at least imposing severe constraints on possible schedules -- if not breaking the system entirely. This is problematic for sparse computations, whose iteration spaces may be irregular. .. table:: :widths: 50 50 diff --git a/docs/python-api/triton.language.rst b/docs/python-api/triton.language.rst index 18bf95be4a7c..1f05ce8a6510 100644 --- a/docs/python-api/triton.language.rst +++ b/docs/python-api/triton.language.rst @@ -106,13 +106,9 @@ Atomic Ops :nosignatures: atomic_cas - atomic_xchg atomic_add atomic_max atomic_min - atomic_and - atomic_or - atomic_xor Comparison ops diff --git a/include/CMakeLists.txt b/include/CMakeLists.txt new file mode 100644 index 000000000000..109c292fea8e --- /dev/null +++ b/include/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(triton) diff --git a/include/triton/Analysis/Alias.h b/include/triton/Analysis/Alias.h new file mode 100644 index 000000000000..fa6b906fc9bb --- /dev/null +++ b/include/triton/Analysis/Alias.h @@ -0,0 +1,80 @@ +#ifndef TRITON_ANALYSIS_ALIAS_H +#define TRITON_ANALYSIS_ALIAS_H + +#include "mlir/Analysis/AliasAnalysis.h" +#include "mlir/Analysis/DataFlowAnalysis.h" +#include "llvm/ADT/DenseSet.h" + +namespace mlir { + +class AliasInfo { +public: + AliasInfo() = default; + AliasInfo(Value value) { insert(value); } + + void insert(Value value) { allocs.insert(value); } + + const DenseSet &getAllocs() const { return allocs; } + + bool operator==(const AliasInfo &other) const { + return allocs == other.allocs; + } + + /// The pessimistic value state of a value without alias + static AliasInfo getPessimisticValueState(MLIRContext *context) { + return AliasInfo(); + } + static AliasInfo getPessimisticValueState(Value value) { return AliasInfo(); } + + /// The union of both arguments + static AliasInfo join(const AliasInfo &lhs, const AliasInfo &rhs); + +private: + /// The set of allocated values that are aliased by this lattice. + /// For now, we only consider aliased value produced by the following + /// situations: + /// 1. values returned by scf.yield + /// 2. block arguments in scf.for + /// Example: + /// alloc v1 alloc v2 + /// | | + /// |--------------| |------------| + /// scf.for v3 scf.for v4 scf.for v5 + /// | + /// scf.yield v6 + /// + /// v1's alloc [v1] + /// v2's alloc [v2] + /// v3's alloc [v1] + /// v4's alloc [v1, v2] + /// v5's alloc [v2] + /// v6's alloc [v1] + /// + /// Therefore, v1's liveness range is the union of v3, v4, and v6 + /// v2's liveness range is the union of v4 and v5. + DenseSet allocs; +}; + +//===----------------------------------------------------------------------===// +// Shared Memory Alias Analysis +//===----------------------------------------------------------------------===// +class SharedMemoryAliasAnalysis : public ForwardDataFlowAnalysis { +public: + using ForwardDataFlowAnalysis::ForwardDataFlowAnalysis; + + /// XXX(Keren): Compatible interface with MLIR AliasAnalysis for future use. + /// Given two values, returns their aliasing behavior. + AliasResult alias(Value lhs, Value rhs); + + /// Returns the modify-reference behavior of `op` on `location`. + ModRefResult getModRef(Operation *op, Value location); + + /// Computes if the alloc set of the results are changed. + ChangeResult + visitOperation(Operation *op, + ArrayRef *> operands) override; +}; + +} // namespace mlir + +#endif // TRITON_ANALYSIS_ALIAS_H diff --git a/include/triton/Analysis/Allocation.h b/include/triton/Analysis/Allocation.h new file mode 100644 index 000000000000..60151d5b69e9 --- /dev/null +++ b/include/triton/Analysis/Allocation.h @@ -0,0 +1,192 @@ +#ifndef TRITON_ANALYSIS_ALLOCATION_H +#define TRITON_ANALYSIS_ALLOCATION_H + +#include "llvm/ADT/DenseMap.h" +#include "llvm/ADT/MapVector.h" +#include "llvm/ADT/SetVector.h" +#include "llvm/Support/raw_ostream.h" + +#include "triton/Dialect/Triton/IR/Dialect.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" +#include +#include + +namespace mlir { + +namespace triton { +class AllocationAnalysis; + +SmallVector +getScratchConfigForCvtLayout(triton::gpu::ConvertLayoutOp op, unsigned &inVec, + unsigned &outVec); + +} // namespace triton + +/// Modified from llvm-15.0: llvm/ADT/AddressRanges.h +/// A class that represents an interval, specified using a start and an end +/// values: [Start, End). +template class Interval { +public: + Interval() {} + Interval(T S, T E) : Start(S), End(E) { assert(Start <= End); } + T start() const { return Start; } + T end() const { return End; } + T size() const { return End - Start; } + bool contains(T Addr) const { return Start <= Addr && Addr < End; } + bool intersects(const Interval &R) const { + return Start < R.End && R.Start < End; + } + bool operator==(const Interval &R) const { + return Start == R.Start && End == R.End; + } + bool operator!=(const Interval &R) const { return !(*this == R); } + bool operator<(const Interval &R) const { + return std::make_pair(Start, End) < std::make_pair(R.Start, R.End); + } + +private: + T Start = std::numeric_limits::min(); + T End = std::numeric_limits::max(); +}; + +class Allocation { +public: + /// A unique identifier for shared memory buffers + using BufferId = size_t; + using BufferIdSetT = DenseSet; + + static constexpr BufferId InvalidBufferId = + std::numeric_limits::max(); + + /// Creates a new Allocation analysis that computes the shared memory + /// information for all associated shared memory values. + Allocation(Operation *operation) : operation(operation) { run(); } + + /// Returns the operation this analysis was constructed from. + Operation *getOperation() const { return operation; } + + /// Returns the offset of the given buffer in the shared memory. + size_t getOffset(BufferId bufferId) const { + return bufferSet.lookup(bufferId).offset; + } + + /// Returns the size of the given buffer in the shared memory. + size_t getAllocatedSize(BufferId bufferId) const { + return bufferSet.lookup(bufferId).size; + } + + /// Returns the buffer id of the given value. + /// This interface only returns the allocated buffer id. + /// If you want to get all the buffer ids that are associated with the given + /// value, including alias buffers, use getBufferIds. + BufferId getBufferId(Value value) const { + if (valueBuffer.count(value)) { + return valueBuffer.lookup(value)->id; + } else { + return InvalidBufferId; + } + } + + /// Returns all the buffer ids of the given value, including alias buffers. + BufferIdSetT getBufferIds(Value value) const { + BufferIdSetT bufferIds; + auto allocBufferId = getBufferId(value); + if (allocBufferId != InvalidBufferId) + bufferIds.insert(allocBufferId); + for (auto *buffer : aliasBuffer.lookup(value)) { + if (buffer->id != InvalidBufferId) + bufferIds.insert(buffer->id); + } + return bufferIds; + } + + /// Returns the scratch buffer id of the given value. + BufferId getBufferId(Operation *operation) const { + if (opScratch.count(operation)) { + return opScratch.lookup(operation)->id; + } else { + return InvalidBufferId; + } + } + + /// Returns the size of total shared memory allocated + size_t getSharedMemorySize() const { return sharedMemorySize; } + + bool isIntersected(BufferId lhsId, BufferId rhsId) const { + if (lhsId == InvalidBufferId || rhsId == InvalidBufferId) + return false; + auto lhsBuffer = bufferSet.lookup(lhsId); + auto rhsBuffer = bufferSet.lookup(rhsId); + return lhsBuffer.intersects(rhsBuffer); + } + +private: + /// A class that represents a shared memory buffer + struct BufferT { + enum class BufferKind { Explicit, Scratch }; + + /// MT: thread-safe + inline static std::atomic nextId = 0; + + BufferKind kind; + BufferId id; + size_t size; + size_t offset; + + bool operator==(const BufferT &other) const { return id == other.id; } + bool operator<(const BufferT &other) const { return id < other.id; } + + BufferT() : BufferT(BufferKind::Explicit) {} + BufferT(BufferKind kind) : BufferT(kind, 0, 0) {} + BufferT(BufferKind kind, size_t size) : BufferT(kind, size, 0) {} + BufferT(BufferKind kind, size_t size, size_t offset) + : kind(kind), id(nextId++), size(size), offset(offset) {} + + bool intersects(const BufferT &other) const { + return Interval(offset, offset + size) + .intersects( + Interval(other.offset, other.offset + other.size)); + } + }; + + /// Op -> Scratch Buffer + using OpScratchMapT = DenseMap; + /// Value -> Explicit Buffer + using ValueBufferMapT = llvm::MapVector; + /// Value -> Alias Buffer + using AliasBufferMapT = llvm::MapVector>; + /// BufferId -> Buffer + using BufferSetT = DenseMap; + /// Runs allocation analysis on the given top-level operation. + void run(); + +private: + template + void addBuffer(KeyType &key, Args &&...args) { + auto buffer = BufferT(Kind, std::forward(args)...); + bufferSet[buffer.id] = std::move(buffer); + if constexpr (Kind == BufferT::BufferKind::Explicit) { + valueBuffer[key] = &bufferSet[buffer.id]; + } else { + opScratch[key] = &bufferSet[buffer.id]; + } + } + + void addAlias(Value value, Value alloc) { + aliasBuffer[value].insert(valueBuffer[alloc]); + } + +private: + Operation *operation; + OpScratchMapT opScratch; + ValueBufferMapT valueBuffer; + AliasBufferMapT aliasBuffer; + BufferSetT bufferSet; + size_t sharedMemorySize = 0; + + friend class triton::AllocationAnalysis; +}; + +} // namespace mlir + +#endif // TRITON_ANALYSIS_ALLOCATION_H diff --git a/include/triton/Analysis/AxisInfo.h b/include/triton/Analysis/AxisInfo.h new file mode 100644 index 000000000000..f9cb2e66b759 --- /dev/null +++ b/include/triton/Analysis/AxisInfo.h @@ -0,0 +1,144 @@ +#ifndef TRITON_ANALYSIS_AXISINFO_H +#define TRITON_ANALYSIS_AXISINFO_H + +#include "mlir/Analysis/DataFlowAnalysis.h" +#include "llvm/Support/raw_ostream.h" +#include + +#include "triton/Dialect/Triton/IR/Dialect.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" + +namespace mlir { + +//===----------------------------------------------------------------------===// +// AxisInfo +//===----------------------------------------------------------------------===// + +/// This lattice value represents known information on the axes of a lattice. +/// Axis information is represented by a std::map +class AxisInfo { +public: + typedef SmallVector DimVectorT; + +public: + // Default constructor + AxisInfo() : AxisInfo({}, {}, {}) {} + // Construct contiguity info with known contiguity + AxisInfo(DimVectorT knownContiguity, DimVectorT knownDivisibility, + DimVectorT knownConstancy) + : contiguity(knownContiguity), divisibility(knownDivisibility), + constancy(knownConstancy), rank(contiguity.size()) { + assert(knownDivisibility.size() == (size_t)rank); + assert(knownConstancy.size() == (size_t)rank); + } + + // Accessors + int getContiguity(size_t d) const { return contiguity[d]; } + const DimVectorT &getContiguity() const { return contiguity; } + + int getDivisibility(size_t d) const { return divisibility[d]; } + const DimVectorT &getDivisibility() const { return divisibility; } + + int getConstancy(size_t d) const { return constancy[d]; } + const DimVectorT &getConstancy() const { return constancy; } + + int getRank() const { return rank; } + + // Comparison + bool operator==(const AxisInfo &other) const { + return (contiguity == other.contiguity) && + (divisibility == other.divisibility) && + (constancy == other.constancy); + } + + /// The pessimistic value state of the contiguity is unknown. + static AxisInfo getPessimisticValueState(MLIRContext *context) { + return AxisInfo(); + } + static AxisInfo getPessimisticValueState(Value value); + + // The gcd of both arguments for each dimension + static AxisInfo join(const AxisInfo &lhs, const AxisInfo &rhs); + +private: + /// The _contiguity_ information maps the `d`-th + /// dimension to the length of the shortest + /// sequence of contiguous integers along it + /// For example: + /// [10, 11, 12, 13, 18, 19, 20, 21] + /// [20, 21, 22, 23, 28, 29, 30, 31] + /// Would have contiguity [1, 4]. + /// and + /// [12, 16, 20, 24] + /// [13, 17, 21, 25] + /// [14, 18, 22, 26] + /// [15, 19, 23, 27] + /// [18, 22, 26, 30] + /// [19, 23, 27, 31] + /// Would have contiguity [2, 1]. + DimVectorT contiguity; + + /// The _divisibility_ information maps the `d`-th + /// dimension to the largest power-of-two that + /// divides the first element of all the values along it + /// For example: + /// [10, 11, 12, 13, 18, 19, 20, 21] + /// [20, 21, 22, 23, 28, 29, 30, 31] + // would have divisibility [1, 2] + // and + /// [12, 16, 20, 24] + /// [13, 17, 21, 25] + /// [14, 18, 22, 26] + /// [15, 19, 23, 27] + // would have divisibility [4, 1] + DimVectorT divisibility; + + /// The _constancy_ information maps the `d`-th + /// dimension to the length of the shortest + /// sequence of constant integer along it. This is + /// particularly useful to infer the contiguity + /// of operations (e.g., add) involving a constant + /// For example + /// [8, 8, 8, 8, 12, 12, 12, 12] + /// [16, 16, 16, 16, 20, 20, 20, 20] + /// would have constancy [1, 4] + DimVectorT constancy; + + // number of dimensions of the lattice + int rank; +}; + +class AxisInfoAnalysis : public ForwardDataFlowAnalysis { + +private: + static const int maxPow2Divisor = 65536; + + int highestPowOf2Divisor(int n) { + if (n == 0) + return maxPow2Divisor; + return (n & (~(n - 1))); + } + + AxisInfo visitBinaryOp( + Operation *op, AxisInfo lhsInfo, AxisInfo rhsInfo, + const std::function &getContiguity, + const std::function &getDivisibility, + const std::function &getConstancy); + +public: + using ForwardDataFlowAnalysis::ForwardDataFlowAnalysis; + + ChangeResult + visitOperation(Operation *op, + ArrayRef *> operands) override; + + unsigned getPtrVectorSize(Value ptr); + + unsigned getPtrAlignment(Value ptr); + + unsigned getMaskAlignment(Value mask); +}; + +} // namespace mlir + +#endif \ No newline at end of file diff --git a/include/triton/Analysis/Membar.h b/include/triton/Analysis/Membar.h new file mode 100644 index 000000000000..ceb192753ae6 --- /dev/null +++ b/include/triton/Analysis/Membar.h @@ -0,0 +1,119 @@ +#ifndef TRITON_ANALYSIS_MEMBAR_H +#define TRITON_ANALYSIS_MEMBAR_H + +#include "Allocation.h" +#include "llvm/ADT/SmallPtrSet.h" + +namespace mlir { + +class OpBuilder; + +//===----------------------------------------------------------------------===// +// Shared Memory Barrier Analysis +//===----------------------------------------------------------------------===// +class MembarAnalysis { +public: + /// Creates a new Membar analysis that generates the shared memory barrier + /// in the following circumstances: + /// - RAW: If a shared memory write is followed by a shared memory read, and + /// their addresses are intersected, a barrier is inserted. + /// - WAR: If a shared memory read is followed by a shared memory read, and + /// their addresses are intersected, a barrier is inserted. + /// The following circumstances do not require a barrier: + /// - WAW: not possible because overlapped memory allocation is not allowed. + /// - RAR: no write is performed. + /// Temporary storage of operations such as Reduce are considered as both + /// a shared memory read. If the temporary storage is written but not read, + /// it is considered as the problem of the operation itself but not the membar + /// analysis. + /// The following circumstances are not considered yet: + /// - Double buffers + /// - N buffers + MembarAnalysis(Allocation *allocation) : allocation(allocation) {} + + /// Runs the membar analysis to the given operation, inserts a barrier if + /// necessary. + void run(); + +private: + struct RegionInfo { + using BufferIdSetT = Allocation::BufferIdSetT; + + BufferIdSetT syncReadBuffers; + BufferIdSetT syncWriteBuffers; + + RegionInfo() = default; + RegionInfo(const BufferIdSetT &syncReadBuffers, + const BufferIdSetT &syncWriteBuffers) + : syncReadBuffers(syncReadBuffers), syncWriteBuffers(syncWriteBuffers) { + } + + /// Unions two RegionInfo objects. + void join(const RegionInfo &other) { + syncReadBuffers.insert(other.syncReadBuffers.begin(), + other.syncReadBuffers.end()); + syncWriteBuffers.insert(other.syncWriteBuffers.begin(), + other.syncWriteBuffers.end()); + } + + /// Returns true if buffers in two RegionInfo objects are intersected. + bool isIntersected(const RegionInfo &other, Allocation *allocation) const { + return /*RAW*/ isIntersected(syncWriteBuffers, other.syncReadBuffers, + allocation) || + /*WAR*/ + isIntersected(syncReadBuffers, other.syncWriteBuffers, + allocation) || + /*WAW*/ + isIntersected(syncWriteBuffers, other.syncWriteBuffers, + allocation); + } + + /// Clears the buffers because a barrier is inserted. + void sync() { + syncReadBuffers.clear(); + syncWriteBuffers.clear(); + } + + private: + /// Returns true if buffers in two sets are intersected. + bool isIntersected(const BufferIdSetT &lhs, const BufferIdSetT &rhs, + Allocation *allocation) const { + return std::any_of(lhs.begin(), lhs.end(), [&](auto lhsId) { + return std::any_of(rhs.begin(), rhs.end(), [&](auto rhsId) { + return allocation->isIntersected(lhsId, rhsId); + }); + }); + } + }; + + /// Applies the barrier analysis based on the SCF dialect, in which each + /// region has a single basic block only. + /// Example: + /// region1 + /// op1 + /// op2 (scf.if) + /// region2 + /// op3 + /// op4 + /// region3 + /// op5 + /// op6 + /// op7 + /// region2 and region3 started with the information of region1. + /// Each region is analyzed separately and keeps their own copy of the + /// information. At op7, we union the information of the region2 and region3 + /// and update the information of region1. + void dfsOperation(Operation *operation, RegionInfo *blockInfo, + OpBuilder *builder); + + /// Updates the RegionInfo operation based on the operation. + void transfer(Operation *operation, RegionInfo *blockInfo, + OpBuilder *builder); + +private: + Allocation *allocation; +}; + +} // namespace mlir + +#endif // TRITON_ANALYSIS_MEMBAR_H diff --git a/include/triton/Analysis/Utility.h b/include/triton/Analysis/Utility.h new file mode 100644 index 000000000000..10e98d260276 --- /dev/null +++ b/include/triton/Analysis/Utility.h @@ -0,0 +1,82 @@ +#ifndef TRITON_ANALYSIS_UTILITY_H +#define TRITON_ANALYSIS_UTILITY_H + +#include "triton/Dialect/TritonGPU/IR/Dialect.h" +#include +#include +#include + +namespace mlir { + +class ReduceOpHelper { +public: + explicit ReduceOpHelper(triton::ReduceOp op) : op(op) { + srcTy = op.operand().getType().cast(); + } + + ArrayRef getSrcShape() { return srcTy.getShape(); } + + Attribute getSrcLayout() { return srcTy.getEncoding(); } + + bool isFastReduction(); + + unsigned getInterWarpSize(); + + unsigned getIntraWarpSize(); + + unsigned getThreadsReductionAxis(); + + SmallVector getScratchConfigBasic(); + + SmallVector> getScratchConfigsFast(); + + unsigned getScratchSizeInBytes(); + +private: + triton::ReduceOp op; + RankedTensorType srcTy{}; +}; + +bool isSharedEncoding(Value value); + +bool maybeSharedAllocationOp(Operation *op); + +bool maybeAliasOp(Operation *op); + +bool supportMMA(triton::DotOp op, int version); + +bool supportMMA(Value value, int version); + +Type getElementType(Value value); + +std::string getValueOperandName(Value value, AsmState &state); + +template +inline SmallVector convertType(ArrayRef in) { + SmallVector out; + for (const T_IN &i : in) + out.push_back(T_OUT(i)); + return out; +} + +template Int product(llvm::ArrayRef arr) { + return std::accumulate(arr.begin(), arr.end(), 1, std::multiplies{}); +} + +template Int ceil(Int m, Int n) { return (m + n - 1) / n; } + +// output[i] = input[order[i]] +template +SmallVector reorder(ArrayRef input, ArrayRef order) { + size_t rank = order.size(); + assert(input.size() == rank); + SmallVector result(rank); + for (auto it : llvm::enumerate(order)) { + result[it.index()] = input[it.value()]; + } + return result; +} + +} // namespace mlir + +#endif // TRITON_ANALYSIS_UTILITY_H diff --git a/include/triton/CMakeLists.txt b/include/triton/CMakeLists.txt new file mode 100644 index 000000000000..b5f579c1a8c8 --- /dev/null +++ b/include/triton/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(Conversion) +add_subdirectory(Dialect) \ No newline at end of file diff --git a/include/triton/Conversion/CMakeLists.txt b/include/triton/Conversion/CMakeLists.txt new file mode 100644 index 000000000000..e25b0da63307 --- /dev/null +++ b/include/triton/Conversion/CMakeLists.txt @@ -0,0 +1,4 @@ + +set(LLVM_TARGET_DEFINITIONS Passes.td) +mlir_tablegen(Passes.h.inc -gen-pass-decls) +add_public_tablegen_target(TritonConversionPassIncGen) \ No newline at end of file diff --git a/include/triton/Conversion/MLIRTypes.h b/include/triton/Conversion/MLIRTypes.h new file mode 100644 index 000000000000..87421f0a8401 --- /dev/null +++ b/include/triton/Conversion/MLIRTypes.h @@ -0,0 +1,40 @@ +#ifndef TRITON_CONVERSION_MLIR_TYPES_H_ +#define TRITON_CONVERSION_MLIR_TYPES_H_ + +#include "mlir/Transforms/DialectConversion.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" + +// This file redefines some common MLIR types for easy usage. +namespace mlir { +namespace triton { +namespace type { + +// Integer types +// TODO(Superjomn): may change `static` into better implementations +static Type i32Ty(MLIRContext *ctx) { return IntegerType::get(ctx, 32); } +static Type i16Ty(MLIRContext *ctx) { return IntegerType::get(ctx, 16); } +static Type i8Ty(MLIRContext *ctx) { return IntegerType::get(ctx, 8); } +static Type u32Ty(MLIRContext *ctx) { + return IntegerType::get(ctx, 32, IntegerType::Unsigned); +} +static Type u1Ty(MLIRContext *ctx) { + return IntegerType::get(ctx, 1, IntegerType::Unsigned); +} + +// Float types +static Type f16Ty(MLIRContext *ctx) { return FloatType::getF16(ctx); } +static Type f32Ty(MLIRContext *ctx) { return FloatType::getF32(ctx); } +static Type f64Ty(MLIRContext *ctx) { return FloatType::getF64(ctx); } +static Type bf16Ty(MLIRContext *ctx) { return FloatType::getBF16(ctx); } + +static bool isFloat(Type type) { + return type.isF32() || type.isF64() || type.isF16() || type.isF128(); +} + +static bool isInt(Type type) { return type.isIntOrFloat() && !isFloat(type); } + +} // namespace type +} // namespace triton +} // namespace mlir + +#endif // TRITON_CONVERSION_MLIR_TYPES_H_ diff --git a/include/triton/Conversion/Passes.h b/include/triton/Conversion/Passes.h new file mode 100644 index 000000000000..42e6cb646975 --- /dev/null +++ b/include/triton/Conversion/Passes.h @@ -0,0 +1,17 @@ +#ifndef TRITON_CONVERSION_PASSES_H +#define TRITON_CONVERSION_PASSES_H + +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "triton/Conversion/TritonGPUToLLVM/TritonGPUToLLVMPass.h" +#include "triton/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.h" + +namespace mlir { +namespace triton { + +#define GEN_PASS_REGISTRATION +#include "triton/Conversion/Passes.h.inc" + +} // namespace triton +} // namespace mlir + +#endif diff --git a/include/triton/Conversion/Passes.td b/include/triton/Conversion/Passes.td new file mode 100644 index 000000000000..70bb20b78e68 --- /dev/null +++ b/include/triton/Conversion/Passes.td @@ -0,0 +1,54 @@ +#ifndef TRITON_CONVERSION_PASSES +#define TRITON_CONVERSION_PASSES + +include "mlir/Pass/PassBase.td" + +def ConvertTritonToTritonGPU: Pass<"convert-triton-to-tritongpu", "mlir::ModuleOp"> { + let summary = "Convert Triton to TritonGPU"; + let description = [{ + + }]; + let constructor = "mlir::triton::createConvertTritonToTritonGPUPass()"; + + let dependentDialects = ["mlir::arith::ArithmeticDialect", + "mlir::math::MathDialect", + "mlir::StandardOpsDialect", + // TODO: Does this pass depend on SCF? + "mlir::scf::SCFDialect", + "mlir::triton::TritonDialect", + "mlir::triton::gpu::TritonGPUDialect"]; + + let options = [ + Option<"numWarps", "num-warps", + "int32_t", /*default*/"4", + "number of warps"> + ]; +} + + +def ConvertTritonGPUToLLVM : Pass<"convert-triton-gpu-to-llvm", "mlir::ModuleOp"> { + let summary = "Convert TritonGPU to LLVM"; + let description = [{ + + }]; + let constructor = "mlir::triton::createConvertTritonGPUToLLVMPass()"; + + let dependentDialects = ["mlir::arith::ArithmeticDialect", + "mlir::math::MathDialect", + "mlir::gpu::GPUDialect", + "mlir::scf::SCFDialect", + "mlir::LLVM::LLVMDialect", + "mlir::tensor::TensorDialect", + "mlir::triton::TritonDialect", + "mlir::triton::gpu::TritonGPUDialect", + "mlir::NVVM::NVVMDialect", + "mlir::StandardOpsDialect"]; + + let options = [ + Option<"computeCapability", "compute-capability", + "int32_t", /*default*/"80", + "device compute capability"> + ]; +} + +#endif diff --git a/include/triton/Conversion/TritonGPUToLLVM/PTXAsmFormat.h b/include/triton/Conversion/TritonGPUToLLVM/PTXAsmFormat.h new file mode 100644 index 000000000000..8d016d117aa7 --- /dev/null +++ b/include/triton/Conversion/TritonGPUToLLVM/PTXAsmFormat.h @@ -0,0 +1,326 @@ +#ifndef TRITON_CONVERSION_TRITONGPU_TO_LLVM_ASM_FORMAT_H +#define TRITON_CONVERSION_TRITONGPU_TO_LLVM_ASM_FORMAT_H + +#include "mlir/IR/Value.h" +#include "triton/Dialect/Triton/IR/Dialect.h" +#include "llvm/ADT/SmallVector.h" +#include "llvm/ADT/StringRef.h" +#include +#include + +namespace mlir { +class ConversionPatternRewriter; +class Location; + +namespace triton { +using llvm::StringRef; + +struct PTXInstr; +struct PTXInstrCommon; +struct PTXInstrExecution; + +// PTXBuilder helps to manage a PTX asm program consists of one or multiple +// instructions. +// +// A helper for building an ASM program, the objective of PTXBuilder is to give +// a thin encapsulation and make the ASM code for MLIR LLVM Dialect more clear. +// Currently, several factors are introduced to reduce the need for mixing +// string and C++ if-else code. +// +// Usage: +// To build: @$3 asm("@%3 add.s32 %0, %1, %2;" : "=r"(i) : "r"(j), "r"(k), +// "b"(p)); +// +// PTXBuilder builder; +// auto& add = builder.create<>(); +// add.predicate(pVal).o("lo").o("u32"); // add any suffix +// // predicate here binds %0 to pVal, pVal is a mlir::Value +// +// auto* iOpr = builder.newOperand(iVal, "r"); // %1 bind to iVal +// auto* jOpr = builder.newOperand(jVal, "r"); // %2 bind to jVal +// auto* kOpr = builder.newOperand(kVal, "r"); // %3 bind to kVal +// add(iOpr, jOpr, kOpr).predicate(predVal); // set operands and predicate +// +// To get the asm code: +// builder.dump() +// +// To get all the mlir::Value used in the PTX code, +// +// builder.getAllMlirArgs() // get {pVal, iVal, jVal, kVal} +// +// To get the string containing all the constraints with "," separated, +// builder.getConstraints() // get "=r,r,k" +// +// PTXBuilder can build a PTX asm with multiple instructions, sample code: +// +// PTXBuilder builder; +// auto& mov = builder.create("mov"); +// auto& cp = builder.create("cp"); +// mov(...); +// cp(...); +// This will get a PTX code with two instructions. +// +// Similar to a C function, a declared PTXInstr instance can be launched +// multiple times with different operands, e.g. +// +// auto& mov = builder.create("mov"); +// mov(... some operands ...); +// mov(... some different operands ...); +// +// Finally, we will get a PTX code with two mov instructions. +// +// There are several derived instruction type for typical instructions, for +// example, the PtxIOInstr for ld and st instructions. +struct PTXBuilder { + struct Operand { + std::string constraint; + Value value; + int idx{-1}; + llvm::SmallVector list; + std::function repr; + + // for list + Operand() = default; + Operand(const Operation &) = delete; + Operand(Value value, StringRef constraint) + : constraint(constraint), value(value) {} + + bool isList() const { return !value && constraint.empty(); } + + Operand *listAppend(Operand *arg) { + list.push_back(arg); + return this; + } + + Operand *listGet(size_t nth) const { + assert(nth < list.size()); + return list[nth]; + } + + std::string dump() const; + }; + + template + INSTR *create(Args &&...args) { + instrs.emplace_back(std::make_unique(this, args...)); + return static_cast(instrs.back().get()); + } + + // Create a list of operands. + Operand *newListOperand() { return newOperand(); } + + Operand *newListOperand(ArrayRef> items) { + auto *list = newOperand(); + for (auto &item : items) { + list->listAppend(newOperand(item.first, item.second)); + } + return list; + } + + Operand *newListOperand(unsigned count, mlir::Value val, + const std::string &constraint) { + auto *list = newOperand(); + for (unsigned i = 0; i < count; ++i) { + list->listAppend(newOperand(val, constraint)); + } + return list; + } + + Operand *newListOperand(unsigned count, const std::string &constraint) { + auto *list = newOperand(); + for (unsigned i = 0; i < count; ++i) { + list->listAppend(newOperand(constraint)); + } + return list; + } + + // Create a new operand. It will not add to operand list. + // @value: the MLIR value bind to this operand. + // @constraint: ASM operand constraint, .e.g. "=r" + // @formatter: extra format to represent this operand in ASM code, default is + // "%{0}".format(operand.idx). + Operand *newOperand(mlir::Value value, StringRef constraint, + std::function formatter = nullptr); + + // Create a new operand which is written to, that is, the constraint starts + // with "=", e.g. "=r". + Operand *newOperand(StringRef constraint); + + // Create a constant integer operand. + Operand *newConstantOperand(int64_t v); + // Create a constant operand with explicit code specified. + Operand *newConstantOperand(const std::string &v); + + Operand *newAddrOperand(mlir::Value addr, StringRef constraint, int off = 0); + + llvm::SmallVector getAllArgs() const; + + llvm::SmallVector getAllMLIRArgs() const; + + std::string getConstraints() const; + + std::string dump() const; + + mlir::Value launch(ConversionPatternRewriter &rewriter, Location loc, + Type resTy, bool hasSideEffect = true, + bool isAlignStack = false, + ArrayRef attrs = {}) const; + +private: + Operand *newOperand() { + argArchive.emplace_back(std::make_unique()); + return argArchive.back().get(); + } + + // Make the operands in argArchive follow the provided \param order. + void reorderArgArchive(ArrayRef order) { + assert(order.size() == argArchive.size()); + // The order in argArchive is unnecessary when onlyAttachMLIRArgs=false, but + // it does necessary when onlyAttachMLIRArgs is true for the $0, $1... are + // determined by PTX code snippet passed from external. + sort(argArchive.begin(), argArchive.end(), + [&](std::unique_ptr &a, std::unique_ptr &b) { + auto ida = std::find(order.begin(), order.end(), a.get()); + auto idb = std::find(order.begin(), order.end(), b.get()); + assert(ida != order.end()); + assert(idb != order.end()); + return ida < idb; + }); + } + + friend struct PTXInstr; + friend struct PTXInstrCommon; + +protected: + llvm::SmallVector, 6> argArchive; + llvm::SmallVector, 2> instrs; + llvm::SmallVector, 4> executions; + int oprCounter{}; +}; + +// PTX instruction common interface. +// Put the generic logic for all the instructions here. +struct PTXInstrCommon { + explicit PTXInstrCommon(PTXBuilder *builder) : builder(builder) {} + + using Operand = PTXBuilder::Operand; + + // clang-format off + PTXInstrExecution& operator()() { return call({}); } + PTXInstrExecution& operator()(Operand* a) { return call({a}); } + PTXInstrExecution& operator()(Operand* a, Operand* b) { return call({a, b}); } + PTXInstrExecution& operator()(Operand* a, Operand* b, Operand* c) { return call({a, b, c}); } + PTXInstrExecution& operator()(Operand* a, Operand* b, Operand* c, Operand* d) { return call({a, b, c, d}); } + PTXInstrExecution& operator()(Operand* a, Operand* b, Operand* c, Operand* d, Operand * e) { return call({a, b, c, d, e}); } + PTXInstrExecution& operator()(Operand* a, Operand* b, Operand* c, Operand* d, Operand * e, Operand* f) { return call({a, b, c, d, e, f}); } + PTXInstrExecution& operator()(Operand* a, Operand* b, Operand* c, Operand* d, Operand * e, Operand* f, Operand* g) { return call({a, b, c, d, e, f, g}); } + // clang-format on + + // Set operands of this instruction. + PTXInstrExecution &operator()(llvm::ArrayRef oprs, + bool onlyAttachMLIRArgs = false); + +protected: + // "Call" the instruction with operands. + // \param oprs The operands of this instruction. + // \param onlyAttachMLIRArgs Indicate that it simply attach the MLIR Arguments + // to the inline Asm without generating the operand ids(such as $0, $1) in PTX + // code. + PTXInstrExecution &call(llvm::ArrayRef oprs, + bool onlyAttachMLIRArgs = false); + + PTXBuilder *builder{}; + llvm::SmallVector instrParts; + + friend struct PTXInstrExecution; +}; + +template struct PTXInstrBase : public PTXInstrCommon { + using Operand = PTXBuilder::Operand; + + explicit PTXInstrBase(PTXBuilder *builder, const std::string &name) + : PTXInstrCommon(builder) { + o(name); + } + + // Append a suffix to the instruction. + // e.g. PTXInstr("add").o("s32") get a add.s32. + // A predicate is used to tell whether to apply the suffix, so that no if-else + // code needed. e.g. `PTXInstr("add").o("s32", isS32).o("u32", !isS32);` will + // get a `add.s32` if isS32 is true. + ConcreteT &o(const std::string &suffix, bool predicate = true) { + if (predicate) + instrParts.push_back(suffix); + return *static_cast(this); + } +}; + +struct PTXInstr : public PTXInstrBase { + using PTXInstrBase::PTXInstrBase; + + // Append a ".global" to the instruction. + PTXInstr &global(); + + // Append a ".shared" to the instruction. + PTXInstr &shared(); + + // Append a ".v[0-9]+" to the instruction + PTXInstr &v(int vecWidth, bool predicate = true); + + // Append a".b[0-9]+" to the instruction + PTXInstr &b(int width); +}; + +// Record the operands and context for "launching" a PtxInstr. +struct PTXInstrExecution { + using Operand = PTXBuilder::Operand; + + llvm::SmallVector argsInOrder; + + PTXInstrExecution() = default; + explicit PTXInstrExecution(PTXInstrCommon *instr, + llvm::ArrayRef oprs, + bool onlyAttachMLIRArgs) + : argsInOrder(oprs.begin(), oprs.end()), instr(instr), + onlyAttachMLIRArgs(onlyAttachMLIRArgs) {} + + // Prefix a predicate to the instruction. + PTXInstrExecution &predicate(mlir::Value value, StringRef constraint = "b") { + pred = instr->builder->newOperand(value, constraint); + return *this; + } + + // Prefix a !predicate to the instruction. + PTXInstrExecution &predicateNot(mlir::Value value, StringRef constraint) { + pred = instr->builder->newOperand(value, constraint); + pred->repr = [](int idx) { return "@!$" + std::to_string(idx); }; + return *this; + } + + std::string dump() const; + + SmallVector getArgList() const; + + PTXInstrCommon *instr{}; + Operand *pred{}; + bool onlyAttachMLIRArgs{}; +}; + +/// ====== Some instruction wrappers ====== +// We add the wrappers to make the usage more intuitive by avoiding mixing the +// PTX code with some trivial C++ code. + +struct PTXCpAsyncLoadInstr : PTXInstrBase { + explicit PTXCpAsyncLoadInstr(PTXBuilder *builder, + triton::CacheModifier modifier) + : PTXInstrBase(builder, "cp.async") { + o(triton::stringifyCacheModifier(modifier).str()); + o("shared"); + o("global"); + } +}; + +} // namespace triton +} // namespace mlir + +#endif diff --git a/include/triton/Conversion/TritonGPUToLLVM/TritonGPUToLLVMPass.h b/include/triton/Conversion/TritonGPUToLLVM/TritonGPUToLLVMPass.h new file mode 100644 index 000000000000..2eebe2eb613a --- /dev/null +++ b/include/triton/Conversion/TritonGPUToLLVM/TritonGPUToLLVMPass.h @@ -0,0 +1,22 @@ +#ifndef TRITON_CONVERSION_TRITONGPU_TO_LLVM_PASS_H +#define TRITON_CONVERSION_TRITONGPU_TO_LLVM_PASS_H + +#include "mlir/Conversion/LLVMCommon/TypeConverter.h" +#include "mlir/Transforms/DialectConversion.h" +#include + +namespace mlir { + +class ModuleOp; +template class OperationPass; + +namespace triton { + +std::unique_ptr> +createConvertTritonGPUToLLVMPass(int computeCapability = 80); + +} // namespace triton + +} // namespace mlir + +#endif diff --git a/include/triton/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.h b/include/triton/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.h new file mode 100644 index 000000000000..2486cdad5038 --- /dev/null +++ b/include/triton/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.h @@ -0,0 +1,25 @@ +#ifndef TRITON_CONVERSION_TRITONTOTRITONGPU_TRITONTOTRITONGPUPASS_H +#define TRITON_CONVERSION_TRITONTOTRITONGPU_TRITONTOTRITONGPUPASS_H + +#include + +namespace mlir { + +class ModuleOp; +template class OperationPass; + +namespace triton { + +constexpr static char AttrNumWarpsName[] = "triton_gpu.num-warps"; + +// Create the pass with numWarps passed from cl::opt. +std::unique_ptr> createConvertTritonToTritonGPUPass(); + +// Create the pass with numWarps set explicitly. +std::unique_ptr> +createConvertTritonToTritonGPUPass(int numWarps); + +} // namespace triton +} // namespace mlir + +#endif diff --git a/include/triton/Dialect/CMakeLists.txt b/include/triton/Dialect/CMakeLists.txt new file mode 100644 index 000000000000..27cb65ce5101 --- /dev/null +++ b/include/triton/Dialect/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(Triton) +add_subdirectory(TritonGPU) diff --git a/include/triton/Dialect/Triton/CMakeLists.txt b/include/triton/Dialect/Triton/CMakeLists.txt new file mode 100644 index 000000000000..9f57627c321f --- /dev/null +++ b/include/triton/Dialect/Triton/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(IR) +add_subdirectory(Transforms) diff --git a/include/triton/Dialect/Triton/IR/CMakeLists.txt b/include/triton/Dialect/Triton/IR/CMakeLists.txt new file mode 100644 index 000000000000..81af1dff103b --- /dev/null +++ b/include/triton/Dialect/Triton/IR/CMakeLists.txt @@ -0,0 +1,19 @@ +set(LLVM_TARGET_DEFINITIONS TritonOps.td) +mlir_tablegen(Ops.h.inc -gen-op-decls) +mlir_tablegen(Ops.cpp.inc -gen-op-defs) +mlir_tablegen(OpsEnums.h.inc -gen-enum-decls) +mlir_tablegen(OpsEnums.cpp.inc -gen-enum-defs) + +set(LLVM_TARGET_DEFINITIONS TritonDialect.td) +mlir_tablegen(Dialect.h.inc -gen-dialect-decls) +mlir_tablegen(Dialect.cpp.inc -gen-dialect-defs) + +set(LLVM_TARGET_DEFINITIONS TritonOps.td) +mlir_tablegen(Types.h.inc -gen-typedef-decls) +mlir_tablegen(Types.cpp.inc -gen-typedef-defs) + +set(LLVM_TARGET_DEFINITIONS TritonInterfaces.td) +mlir_tablegen(AttrInterfaces.h.inc -gen-attr-interface-decls) +mlir_tablegen(AttrInterfaces.cpp.inc -gen-attr-interface-defs) + +add_public_tablegen_target(TritonTableGen) diff --git a/include/triton/Dialect/Triton/IR/Dialect.h b/include/triton/Dialect/Triton/IR/Dialect.h new file mode 100644 index 000000000000..fb4a646079dc --- /dev/null +++ b/include/triton/Dialect/Triton/IR/Dialect.h @@ -0,0 +1,48 @@ +#ifndef TRITON_DIALECT_TRITON_IR_DIALECT_H_ +#define TRITON_DIALECT_TRITON_IR_DIALECT_H_ + +#include "mlir/Dialect/Math/IR/Math.h" +#include "mlir/Dialect/SCF/SCF.h" +#include "mlir/Dialect/StandardOps/IR/Ops.h" +#include "mlir/Dialect/Tensor/IR/Tensor.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/Dialect.h" +#include "mlir/Interfaces/ControlFlowInterfaces.h" + +#include "triton/Dialect/Triton/IR/Dialect.h.inc" +#include "triton/Dialect/Triton/IR/OpsEnums.h.inc" +#include "triton/Dialect/Triton/IR/Traits.h" +#include "triton/Dialect/Triton/IR/Types.h" + +#define GET_OP_CLASSES +#include "triton/Dialect/Triton/IR/Ops.h.inc" + +namespace mlir { +namespace triton { + +class DialectInferLayoutInterface + : public DialectInterface::Base { +public: + DialectInferLayoutInterface(Dialect *dialect) : Base(dialect) {} + + virtual LogicalResult + inferReduceOpEncoding(Attribute operandEncoding, unsigned axis, + Attribute &resultEncoding) const = 0; + + virtual LogicalResult + inferExpandDimsOpEncoding(Attribute operandEncoding, unsigned axis, + Attribute &resultEncoding, + Optional location) const = 0; + + // Note: this function only verify operand encoding but doesn't infer result + // encoding + virtual LogicalResult + inferDotOpEncoding(Attribute operandEncoding, unsigned opIdx, + Attribute retEncoding, + Optional location) const = 0; +}; + +} // namespace triton +} // namespace mlir + +#endif // TRITON_IR_DIALECT_H_ diff --git a/include/triton/Dialect/Triton/IR/Interfaces.h b/include/triton/Dialect/Triton/IR/Interfaces.h new file mode 100644 index 000000000000..f8f3a6f74ccb --- /dev/null +++ b/include/triton/Dialect/Triton/IR/Interfaces.h @@ -0,0 +1,9 @@ +#ifndef TRITON_IR_INTERFACES_H_ +#define TRITON_IR_INTERFACES_H_ + +#include "mlir/IR/OpDefinition.h" + +#define GET_TYPEDEF_CLASSES +#include "triton/Dialect/Triton/IR/AttrInterfaces.h.inc" + +#endif // TRITON_IR_TYPES_H_ diff --git a/include/triton/Dialect/Triton/IR/Traits.h b/include/triton/Dialect/Triton/IR/Traits.h new file mode 100644 index 000000000000..e83a8e3b908f --- /dev/null +++ b/include/triton/Dialect/Triton/IR/Traits.h @@ -0,0 +1,60 @@ +#ifndef TRITON_IR_TRAITS_H_ +#define TRITON_IR_TRAITS_H_ + +#include "mlir/IR/OpDefinition.h" + +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/Support/LogicalResult.h" + +#include + +namespace mlir { +namespace OpTrait { + +// These functions are out-of-line implementations of the methods in the +// corresponding trait classes. This avoids them being template +// instantiated/duplicated. +namespace impl { +LogicalResult verifySameOperandsAndResultEncoding(Operation *op); +LogicalResult verifySameOperandsEncoding(Operation *op); +// The rationale for this trait is to prevent users from creating programs +// that would have catastrophic register pressure and cause the compiler to +// hang. +// Since H100 has 256KB registers, we should allow users to create tensors +// of size up to 256K elements. It will spill for datatypes wider than 1B, +// but we probably should limit number of elements (rather than bytes) to +// keep specs simple +int constexpr maxTensorNumElements = 1048576; +LogicalResult verifyTensorSize(Operation *op); +} // namespace impl + +template +class TensorSizeTrait : public TraitBase { +public: + static LogicalResult verifyTrait(Operation *op) { + return impl::verifyTensorSize(op); + } +}; + +template +class SameOperandsAndResultEncoding + : public TraitBase { +public: + static LogicalResult verifyTrait(Operation *op) { + return impl::verifySameOperandsAndResultEncoding(op); + } +}; + +template +class SameOperandsEncoding + : public TraitBase { +public: + static LogicalResult verifyTrait(Operation *op) { + return impl::verifySameOperandsEncoding(op); + } +}; + +} // namespace OpTrait +} // namespace mlir + +#endif diff --git a/include/triton/Dialect/Triton/IR/TritonAttrDefs.td b/include/triton/Dialect/Triton/IR/TritonAttrDefs.td new file mode 100644 index 000000000000..bb0f4c0676dc --- /dev/null +++ b/include/triton/Dialect/Triton/IR/TritonAttrDefs.td @@ -0,0 +1,68 @@ +#ifndef TRITON_ATTR_DEFS +#define TRITON_ATTR_DEFS + +include "mlir/IR/EnumAttr.td" + +// Attrs for LoadOp +def TT_CacheModifierAttr : I32EnumAttr< + "CacheModifier", "", + [ + I32EnumAttrCase<"NONE", 1, "none">, + I32EnumAttrCase<"CA", 2, "ca">, + I32EnumAttrCase<"CG", 3, "cg">, + ]> { + let cppNamespace = "::mlir::triton"; +} +def TT_EvictionPolicyAttr : I32EnumAttr< + "EvictionPolicy", "", + [ + I32EnumAttrCase<"NORMAL", 1, "evict_normal">, + I32EnumAttrCase<"EVICT_FIRST", 2, "evict_first">, + I32EnumAttrCase<"EVICT_LAST", 3, "evict_last"> + ]> { + let cppNamespace = "::mlir::triton"; +} + +// reduction +def TT_RedOpAttr : I32EnumAttr< + /*name*/"RedOp", /*summary*/"", + /*case*/ + [ + I32EnumAttrCase, + I32EnumAttrCase<"FADD", 2, "fadd">, + I32EnumAttrCase<"MIN", 3, "min">, + I32EnumAttrCase<"MAX", 4, "max">, + I32EnumAttrCase<"UMIN", 5, "umin">, + I32EnumAttrCase<"UMAX", 6, "umax">, + I32EnumAttrCase<"ARGMIN", 7, "argmin">, + I32EnumAttrCase<"ARGMAX", 8, "argmax">, + I32EnumAttrCase<"ARGUMIN", 9, "argumin">, + I32EnumAttrCase<"ARGUMAX", 10, "argumax">, + I32EnumAttrCase<"FMIN", 11, "fmin">, + I32EnumAttrCase<"FMAX", 12, "fmax">, + I32EnumAttrCase<"ARGFMIN", 13, "argfmin">, + I32EnumAttrCase<"ARGFMAX", 14, "argfmax">, + I32EnumAttrCase<"XOR", 15, "xor"> + ]> { + let cppNamespace = "::mlir::triton"; +} + +// atomic +def TT_AtomicRMWAttr : I32EnumAttr< + "RMWOp", "", + [ + I32EnumAttrCase<"AND", 1, "and">, + I32EnumAttrCase<"OR", 2, "or">, + I32EnumAttrCase<"XOR", 3, "xor">, + I32EnumAttrCase<"ADD", 4, "add">, + I32EnumAttrCase<"FADD", 5, "fadd">, + I32EnumAttrCase<"MAX", 6, "max">, + I32EnumAttrCase<"MIN", 7, "min">, + I32EnumAttrCase<"UMAX", 8, "umax">, + I32EnumAttrCase<"UMIN", 9, "umin">, + I32EnumAttrCase<"XCHG", 10, "exch"> + ]> { + let cppNamespace = "::mlir::triton"; +} + +#endif diff --git a/include/triton/Dialect/Triton/IR/TritonDialect.td b/include/triton/Dialect/Triton/IR/TritonDialect.td new file mode 100644 index 000000000000..07b069e14fb8 --- /dev/null +++ b/include/triton/Dialect/Triton/IR/TritonDialect.td @@ -0,0 +1,46 @@ +#ifndef TRITON_DIALECT +#define TRITON_DIALECT + +include "mlir/IR/OpBase.td" + +def Triton_Dialect : Dialect { + let name = "tt"; + + let cppNamespace = "::mlir::triton"; + + let summary = "The Triton IR in MLIR"; + + let description = [{ + Triton Dialect. + + Dependent Dialects: + * Arithmetic: + * addf, addi, andi, cmpf, cmpi, divf, fptosi, ... + * Math: + * exp, sin, cos, log, ... + * StructuredControlFlow: + * ForOp, IfOp, WhileOp, YieldOp, ConditionOp + }]; + + let dependentDialects = [ + "arith::ArithmeticDialect", + "math::MathDialect", + "StandardOpsDialect", + "scf::SCFDialect", + + // Since LLVM 15 + // "cf::ControlFlowDialect", + // "func::FuncDialect" + ]; + + let extraClassDeclaration = [{ + void registerTypes(); + }]; + + let hasConstantMaterializer = 1; +} + +include "triton/Dialect/Triton/IR/TritonTypes.td" + + +#endif // TRITON_DIALECT diff --git a/include/triton/Dialect/Triton/IR/TritonInterfaces.td b/include/triton/Dialect/Triton/IR/TritonInterfaces.td new file mode 100644 index 000000000000..2c4014cab061 --- /dev/null +++ b/include/triton/Dialect/Triton/IR/TritonInterfaces.td @@ -0,0 +1,11 @@ +#ifndef TRITON_INTERFACES +#define TRITON_INTERFACES + +include "mlir/IR/OpBase.td" + +def TensorSizeTrait : NativeOpTrait<"TensorSizeTrait">; +def SameOperandsAndResultEncoding : NativeOpTrait<"SameOperandsAndResultEncoding">; +def SameOperandsEncoding : NativeOpTrait<"SameOperandsEncoding">; + + +#endif // TRITON_INTERFACES \ No newline at end of file diff --git a/include/triton/Dialect/Triton/IR/TritonOps.td b/include/triton/Dialect/Triton/IR/TritonOps.td new file mode 100644 index 000000000000..1a7a982ac3b8 --- /dev/null +++ b/include/triton/Dialect/Triton/IR/TritonOps.td @@ -0,0 +1,423 @@ +#ifndef TRITON_OPS +#define TRITON_OPS + +include "triton/Dialect/Triton/IR/TritonDialect.td" +include "triton/Dialect/Triton/IR/TritonTypes.td" +include "triton/Dialect/Triton/IR/TritonAttrDefs.td" +include "triton/Dialect/Triton/IR/TritonInterfaces.td" +include "mlir/IR/OpBase.td" +include "mlir/Interfaces/SideEffectInterfaces.td" // NoSideEffect +include "mlir/Interfaces/ControlFlowInterfaces.td" // BranchOpInterface +include "mlir/Interfaces/InferTypeOpInterface.td" // SameOperandsAndResultType +include "mlir/Interfaces/SideEffectInterfaces.td" // NoSideEffect +include "mlir/Interfaces/CastInterfaces.td" // CastOpInterface + +// +// Op Base +// +class TT_Op traits = []> : + Op { +} + +// +// CastOps +// +// Use cast ops in arith: +// bitcast +// fptoui, fptosi, uitofp, sitofp, +// extf, tructf, +// extui, extsi, tructi +def TT_IntToPtrOp : TT_Op<"int_to_ptr", [SameOperandsAndResultShape, + SameOperandsAndResultEncoding, + NoSideEffect, + /*DeclareOpInterfaceMethods*/]> { + let summary = "Cast int64 to pointer"; + + let arguments = (ins TT_I64Like:$from); + + let results = (outs TT_PtrLike:$result); + + let assemblyFormat = "$from attr-dict `:` type($from) `->` type($result)"; +} + +def TT_PtrToIntOp : TT_Op<"ptr_to_int", [SameOperandsAndResultShape, + SameOperandsAndResultEncoding, + NoSideEffect, + /*DeclareOpInterfaceMethods*/]> { + let summary = "Cast pointer to int64"; + + let arguments = (ins TT_PtrLike:$from); + + let results = (outs TT_I64Like:$result); + + let assemblyFormat = "$from attr-dict `:` type($from) `->` type($result)"; +} + +// arith.bitcast doesn't support pointers +def TT_BitcastOp : TT_Op<"bitcast", [SameOperandsAndResultShape, + SameOperandsAndResultEncoding, + NoSideEffect, + /*DeclareOpInterfaceMethods*/]> { + let summary = "Cast between types of the same bitwidth"; + + let arguments = (ins TT_Type:$from); + + let results = (outs TT_Type:$result); + + let assemblyFormat = "$from attr-dict `:` type($from) `->` type($result)"; + + // TODO: Add verifier +} + +def TT_FpToFpOp : TT_Op<"fp_to_fp", [SameOperandsAndResultShape, + SameOperandsAndResultEncoding, + NoSideEffect, + DeclareOpInterfaceMethods]> { + let summary = "Floating point casting for custom types"; + + let description = [{ + Floating point casting for custom types (F8). + + F8 <-> FP16, BF16, FP32, FP64 + }]; + + let arguments = (ins TT_FloatLike:$from); + + let results = (outs TT_FloatLike:$result); + + let assemblyFormat = "$from attr-dict `:` type($from) `->` type($result)"; + + // TODO: We need a verifier here. +} + +// +// Pointer Arith Ops +// + +def TT_AddPtrOp : TT_Op<"addptr", + [NoSideEffect, + SameOperandsAndResultShape, + SameOperandsAndResultEncoding, + TypesMatchWith<"result type matches ptr type", + "result", "ptr", "$_self">]> { + let arguments = (ins TT_PtrLike:$ptr, TT_IntLike:$offset); + + let results = (outs TT_PtrLike:$result); + + let assemblyFormat = "$ptr `,` $offset attr-dict `:` type($result) `,` type($offset)"; +} + + +// +// Load/Store Ops +// +def TT_LoadOp : TT_Op<"load", + [SameOperandsAndResultShape, + SameOperandsAndResultEncoding, + AttrSizedOperandSegments, + MemoryEffects<[MemRead]>, + TypesMatchWith<"infer ptr type from result type", + "result", "ptr", "getPointerTypeSameShape($_self)">, + TypesMatchWith<"infer mask type from result type or none", + "result", "mask", "getI1SameShape($_self)", + "($_op.getOperands().size() <= 1) || std::equal_to<>()">, + TypesMatchWith<"infer other type from result type or none", + "result", "other", "$_self", + "($_op.getOperands().size() <= 2) || std::equal_to<>()">]> { + let summary = "load"; + + let arguments = (ins TT_PtrLike:$ptr, Optional:$mask, Optional:$other, + TT_CacheModifierAttr:$cache, TT_EvictionPolicyAttr:$evict, + BoolAttr:$isVolatile); + + let results = (outs TT_Type:$result); + + let builders = [ + OpBuilder<(ins "Value":$ptr, "triton::CacheModifier":$cache, + "triton::EvictionPolicy":$evict, "bool":$isVolatile)>, + OpBuilder<(ins "Value":$ptr, "Value":$mask, "triton::CacheModifier":$cache, + "triton::EvictionPolicy":$evict, "bool":$isVolatile)>, + OpBuilder<(ins "Value":$ptr, "Value":$mask, "Value":$other, "triton::CacheModifier":$cache, + "triton::EvictionPolicy":$evict, "bool":$isVolatile)>, + ]; + + // let assemblyFormat = "operands attr-dict `:` type($result)"; + let parser = [{ return mlir::triton::parseLoadOp(parser, result); }]; + + let printer = [{ return mlir::triton::printLoadOp(p, *this); }]; + + let hasCanonicalizer = 1; +} + +def TT_StoreOp : TT_Op<"store", + [SameOperandsShape, + SameOperandsEncoding, + MemoryEffects<[MemWrite]>, + TypesMatchWith<"infer ptr type from value type", + "value", "ptr", + "getPointerTypeSameShape($_self)">, + TypesMatchWith<"infer mask type from value type", + "value", "mask", "getI1SameShape($_self)", + "($_op.getOperands().size() <= 2) || std::equal_to<>()">]> { + let summary = "store"; + + let arguments = (ins TT_PtrLike:$ptr, TT_Type:$value, Optional:$mask); + + let builders = [ + OpBuilder<(ins "Value":$ptr, "Value":$value)>, + ]; + + // let assemblyFormat = "operands attr-dict `:` type($value)"; + let parser = [{ return mlir::triton::parseStoreOp(parser, result); }]; + + let printer = [{ return mlir::triton::printStoreOp(p, *this); }]; + + let hasCanonicalizer = 1; +} + +// +// Atomic Op +// +def TT_AtomicRMWOp : TT_Op<"atomic_rmw", [SameOperandsAndResultShape, + SameOperandsAndResultEncoding, + MemoryEffects<[MemRead]>, + MemoryEffects<[MemWrite]>, + TypesMatchWith<"infer ptr type from value type", + "val", "ptr", + "getPointerTypeSameShape($_self)">, + TypesMatchWith<"infer mask type from value type", + "val", "mask", "getI1SameShape($_self)", + "($_op.getOperands().size() <= 2) || std::equal_to<>()">]> { + let summary = "atomic rmw"; + + let description = [{ + load data at $ptr, do $rmw_op with $val, and store result to $ptr. + + return old value at $ptr + }]; + + let arguments = (ins TT_AtomicRMWAttr:$atomic_rmw_op, TT_PtrLike:$ptr, + TT_Type:$val, Optional:$mask); + + let results = (outs TT_Type:$result); +} + +def TT_AtomicCASOp : TT_Op<"atomic_cas", [MemoryEffects<[MemRead]>, + MemoryEffects<[MemWrite]>, + SameOperandsAndResultShape, + SameOperandsAndResultEncoding]> { + let summary = "atomic cas"; + + let description = [{ + compare $cmp with data $old at location $ptr, + + if $old == $cmp, store $val to $ptr, + + else store $old to $ptr, + + return $old + }]; + + let arguments = (ins TT_Ptr:$ptr, TT_Type:$cmp, TT_Type:$val); + + let results = (outs TT_Type:$result); +} + + +// +// Shape Manipulation Ops +// +def TT_SplatOp : TT_Op<"splat", [NoSideEffect, + SameOperandsAndResultElementType]> { + let summary = "splat"; + + let arguments = (ins TT_Type:$src); + + let results = (outs TT_Tensor:$result); + + let assemblyFormat = "$src attr-dict `:` functional-type(operands, results)"; + + let hasFolder = 1; +} + +def TT_ExpandDimsOp : TT_Op<"expand_dims", [NoSideEffect, + DeclareOpInterfaceMethods, + SameOperandsAndResultElementType]> { + let summary = "expand_dims"; + + let arguments = (ins TT_Tensor:$src, I32Attr:$axis); + + let results = (outs TT_Tensor:$result); + + let assemblyFormat = "$src attr-dict `:` functional-type(operands, results)"; +} + +def TT_ViewOp : TT_Op<"view", [NoSideEffect, + SameOperandsAndResultElementType]> { + let summary = "view"; + + let arguments = (ins TT_Tensor:$src); + + let results = (outs TT_Tensor:$result); + + let assemblyFormat = "$src attr-dict `:` functional-type(operands, results)"; + +} + +def TT_BroadcastOp : TT_Op<"broadcast", [NoSideEffect, + SameOperandsAndResultElementType]> { + let summary = "broadcast. No left-padding as of now."; + + let arguments = (ins TT_Type:$src); + + let results = (outs TT_Type:$result); + + let assemblyFormat = "$src attr-dict `:` functional-type(operands, results)"; + + let hasFolder = 1; +} + +def TT_CatOp : TT_Op<"cat", [NoSideEffect, + SameOperandsAndResultElementType]> { + let summary = "concatenate 2 tensors"; + + let arguments = (ins TT_Tensor:$lhs, TT_Tensor:$rhs); + + let results = (outs TT_Tensor:$result); + + let assemblyFormat = "$lhs `,` $rhs attr-dict `:` functional-type(operands, results)"; +} + +def TT_TransOp : TT_Op<"trans", [NoSideEffect, + SameOperandsAndResultElementType]> { + + let summary = "transpose a tensor"; + + let arguments = (ins TT_Tensor:$src); + + let results = (outs TT_Tensor:$result); + + let assemblyFormat = "$src attr-dict `:` functional-type(operands, results)"; +} + +// +// SPMD Ops +// +def TT_GetProgramIdOp : TT_Op<"get_program_id", [NoSideEffect]> { + let arguments = (ins I32Attr:$axis); + + let results = (outs I32:$result); + + let assemblyFormat = "attr-dict `:` type($result)"; +} + +def TT_GetNumProgramsOp : TT_Op<"get_num_programs", [NoSideEffect]> { + let arguments = (ins I32Attr:$axis); + + let results = (outs I32:$result); + + let assemblyFormat = "attr-dict `:` type($result)"; +} + +// +// Dot Op +// +def TT_DotOp : TT_Op<"dot", [NoSideEffect, + DeclareOpInterfaceMethods, + TypesMatchWith<"result's type matches accumulator's type", + "d", "c", "$_self">]> { + let summary = "dot"; + + let description = [{ + $d = matrix_multiply($a, $b) + $c + }]; + + let arguments = (ins TT_FpIntTensor:$a, TT_FpIntTensor:$b, TT_FpIntTensor:$c, BoolAttr:$allowTF32); + + let results = (outs TT_FpIntTensor:$d); + + let assemblyFormat = "$a`,` $b`,` $c attr-dict `:` type($a) `*` type($b) `->` type($d)"; +} + +// +// Reduce Op +// +def TT_ReduceOp : TT_Op<"reduce", [NoSideEffect, + DeclareOpInterfaceMethods]> { + let summary = "reduce"; + + let arguments = (ins TT_RedOpAttr:$redOp, TT_Tensor:$operand, I32Attr:$axis); + + let results = (outs TT_Type:$result); + + let builders = [ + OpBuilder<(ins "triton::RedOp":$redOp, "Value":$operand, "int":$axis)>, + ]; + + let assemblyFormat = "$operand attr-dict `:` type($operand) `->` type($result)"; + + let extraClassDeclaration = [{ + // This member function is marked static because we need to call it before the ReduceOp + // is constructed, see the implementation of create_reduce in triton.cc. + static bool withIndex(mlir::triton::RedOp redOp); + }]; +} + +// +// External elementwise op +// +def TT_ExtElemwiseOp : TT_Op<"ext_elemwise", [NoSideEffect, Elementwise, SameOperandsAndResultShape, + SameOperandsAndResultEncoding, + SameVariadicOperandSize]> { + let summary = "ext_elemwise"; + + let description = [{ + call an external function $symbol implemented in $libpath/$libname with $args + + return $libpath/$libname:$symbol($args...) + }]; + + let arguments = (ins Variadic:$args, StrAttr:$libname, StrAttr:$libpath, StrAttr:$symbol); + + let results = (outs TT_Type:$result); + + let assemblyFormat = "operands attr-dict `:` type(operands) `->` type($result)"; +} + +// +// Make Range Op +// +// TODO: should have ConstantLike as Trait +def TT_MakeRangeOp : TT_Op<"make_range", [NoSideEffect]> { + let summary = "make range"; + + let description = [{ + Returns an 1D int32 tensor. + + Values span from $start to $end (exclusive), with step = 1 + }]; + + let arguments = (ins I32Attr:$start, I32Attr:$end); + + let results = (outs TT_IntTensor:$result); + + let assemblyFormat = "attr-dict `:` type($result)"; +} + +// +// Make PrintfOp +// +def TT_PrintfOp : TT_Op<"printf", [MemoryEffects<[MemWrite]>]>, + Arguments<(ins StrAttr:$prefix, + Variadic>:$args)> { + let summary = "Device-side printf, as in CUDA for debugging"; + let description = [{ + `tt.printf` takes a literal string prefix and an arbitrary number of scalar or tensor arguments that should be printed. + format are generated automatically from the arguments. + }]; + let assemblyFormat = [{ + $prefix attr-dict ($args^ `:` type($args))? + }]; +} + +#endif // Triton_OPS diff --git a/include/triton/Dialect/Triton/IR/TritonTypes.td b/include/triton/Dialect/Triton/IR/TritonTypes.td new file mode 100644 index 000000000000..66d2a7b9a9b3 --- /dev/null +++ b/include/triton/Dialect/Triton/IR/TritonTypes.td @@ -0,0 +1,71 @@ +#ifndef TRITON_TYPES +#define TRITON_TYPES + +include "triton/Dialect/Triton/IR/TritonDialect.td" + +// +// Types +// +class TritonTypeDef + : TypeDef { + // Used by printer/parser + let mnemonic = _mnemonic; +} + +// Floating-point Type +def F8 : TritonTypeDef<"Float8", "f8">; + +def TT_Float : AnyTypeOf<[F8, F16, BF16, F32, F64], "floating-point">; +def TT_FloatTensor : TensorOf<[TT_Float]>; +def TT_FloatLike : AnyTypeOf<[TT_Float, TT_FloatTensor]>; + +// Boolean Type +// TT_Bool -> I1 +def TT_BoolTensor : TensorOf<[I1]>; +def TT_BoolLike : AnyTypeOf<[I1, TT_BoolTensor]>; + +// Integer Type +def TT_Int : AnyTypeOf<[I1, I8, I16, I32, I64], "integer">; +def TT_IntTensor : TensorOf<[TT_Int]>; +def TT_IntLike : AnyTypeOf<[TT_Int, TT_IntTensor]>; + +// I32 Type +// TT_I32 -> I32 +// TT_I32Tensor -> I32Tensor +def TT_I32Like: AnyTypeOf<[I32, I32Tensor]>; + +// I64 Type +// TT_I64 -> I64 +// TT_I64Tensor -> I64Tensor +def TT_I64Like: AnyTypeOf<[I64, I64Tensor]>; + +// Pointer Type +def TT_Ptr : TritonTypeDef<"Pointer", "ptr"> { + let summary = "pointer type"; + + let description = [{ + Triton PointerType + }]; + + let parameters = (ins "Type":$pointeeType, "int":$addressSpace); + + let builders = [ + TypeBuilderWithInferredContext<(ins + "Type":$pointeeType, + "int":$addressSpace + ), [{ + return $_get(pointeeType.getContext(), pointeeType, addressSpace); + }]> + ]; + + let skipDefaultBuilders = 1; +} +def TT_PtrTensor : TensorOf<[TT_Ptr]>; +def TT_PtrLike : AnyTypeOf<[TT_Ptr, TT_PtrTensor]>; + +def TT_FpIntTensor : AnyTypeOf<[TT_FloatTensor, TT_IntTensor]>; +def TT_Tensor : AnyTypeOf<[TT_FpIntTensor, TT_PtrTensor]>; + +def TT_Type : AnyTypeOf<[TT_FloatLike, TT_IntLike, TT_PtrLike]>; + +#endif diff --git a/include/triton/Dialect/Triton/IR/Types.h b/include/triton/Dialect/Triton/IR/Types.h new file mode 100644 index 000000000000..5ffd7db3588c --- /dev/null +++ b/include/triton/Dialect/Triton/IR/Types.h @@ -0,0 +1,10 @@ +#ifndef TRITON_IR_TYPES_H_ +#define TRITON_IR_TYPES_H_ + +#include "mlir/IR/TypeSupport.h" +#include "mlir/IR/Types.h" + +#define GET_TYPEDEF_CLASSES +#include "triton/Dialect/Triton/IR/Types.h.inc" + +#endif // TRITON_IR_TYPES_H_ diff --git a/include/triton/Dialect/Triton/Transforms/CMakeLists.txt b/include/triton/Dialect/Triton/Transforms/CMakeLists.txt new file mode 100644 index 000000000000..372a9ec11ede --- /dev/null +++ b/include/triton/Dialect/Triton/Transforms/CMakeLists.txt @@ -0,0 +1,3 @@ +set(LLVM_TARGET_DEFINITIONS Passes.td) +mlir_tablegen(Passes.h.inc -gen-pass-decls -name Triton) +add_public_tablegen_target(TritonTransformsIncGen) diff --git a/include/triton/Dialect/Triton/Transforms/Passes.h b/include/triton/Dialect/Triton/Transforms/Passes.h new file mode 100644 index 000000000000..c4176d3a6724 --- /dev/null +++ b/include/triton/Dialect/Triton/Transforms/Passes.h @@ -0,0 +1,18 @@ +#ifndef TRITON_DIALECT_TRITON_TRANSFORMS_PASSES_H_ +#define TRITON_DIALECT_TRITON_TRANSFORMS_PASSES_H_ + +#include "mlir/Pass/Pass.h" + +namespace mlir { +namespace triton { + +std::unique_ptr createCombineOpsPass(); + +} // namespace triton + +#define GEN_PASS_REGISTRATION +#include "triton/Dialect/Triton/Transforms/Passes.h.inc" + +} // namespace mlir + +#endif diff --git a/include/triton/Dialect/Triton/Transforms/Passes.td b/include/triton/Dialect/Triton/Transforms/Passes.td new file mode 100644 index 000000000000..8f77aed774aa --- /dev/null +++ b/include/triton/Dialect/Triton/Transforms/Passes.td @@ -0,0 +1,23 @@ +#ifndef TRITON_PASSES +#define TRITON_PASSES + +include "mlir/Pass/PassBase.td" + +def TritonCombineOps : Pass { + let summary = "combine ops"; + let description = [{ + dot(a, b, 0) + c => dot(a, b, c) + + addptr(addptr(ptr, idx0), idx1) => addptr(ptr, AddI(idx0, idx1)) + + select(cond, load(ptrs, broadcast(cond), ???), other) => + load(ptrs, broadcast(cond), other) + }]; + + let constructor = "mlir::triton::createCombineOpsPass()"; + + let dependentDialects = ["mlir::arith::ArithmeticDialect", + /*SelectOp*/"mlir::StandardOpsDialect"]; +} + +#endif diff --git a/include/triton/Dialect/TritonGPU/CMakeLists.txt b/include/triton/Dialect/TritonGPU/CMakeLists.txt new file mode 100644 index 000000000000..9f57627c321f --- /dev/null +++ b/include/triton/Dialect/TritonGPU/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(IR) +add_subdirectory(Transforms) diff --git a/include/triton/Dialect/TritonGPU/IR/CMakeLists.txt b/include/triton/Dialect/TritonGPU/IR/CMakeLists.txt new file mode 100644 index 000000000000..c44acaa3d472 --- /dev/null +++ b/include/triton/Dialect/TritonGPU/IR/CMakeLists.txt @@ -0,0 +1,12 @@ +set(LLVM_TARGET_DEFINITIONS TritonGPUOps.td) +mlir_tablegen(Dialect.h.inc -gen-dialect-decls -dialect=triton_gpu) +mlir_tablegen(Dialect.cpp.inc -gen-dialect-defs -dialect=triton_gpu) +mlir_tablegen(Ops.h.inc -gen-op-decls) +mlir_tablegen(Ops.cpp.inc -gen-op-defs) +add_public_tablegen_target(TritonGPUTableGen) + +set(LLVM_TARGET_DEFINITIONS TritonGPUAttrDefs.td) +mlir_tablegen(TritonGPUAttrDefs.h.inc -gen-attrdef-decls) +mlir_tablegen(TritonGPUAttrDefs.cpp.inc -gen-attrdef-defs) +add_public_tablegen_target(TritonGPUAttrDefsIncGen) + diff --git a/include/triton/Dialect/TritonGPU/IR/Dialect.h b/include/triton/Dialect/TritonGPU/IR/Dialect.h new file mode 100644 index 000000000000..8c24a5777f63 --- /dev/null +++ b/include/triton/Dialect/TritonGPU/IR/Dialect.h @@ -0,0 +1,46 @@ +#ifndef TRITON_DIALECT_TRITONGPU_IR_DIALECT_H_ +#define TRITON_DIALECT_TRITONGPU_IR_DIALECT_H_ + +#include "mlir/Dialect/GPU/GPUDialect.h" +#include "mlir/Dialect/Tensor/IR/Tensor.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/Dialect.h" + +// TritonGPU depends on Triton +#include "triton/Dialect/Triton/IR/Dialect.h" + +#include "triton/Dialect/TritonGPU/IR/Dialect.h.inc" +#include "triton/Dialect/TritonGPU/IR/Traits.h" + +#define GET_ATTRDEF_CLASSES +#include "triton/Dialect/Triton/IR/AttrInterfaces.h.inc" +#include "triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.h.inc" + +#define GET_OP_CLASSES +#include "triton/Dialect/TritonGPU/IR/Ops.h.inc" + +namespace mlir { +namespace triton { +namespace gpu { + +unsigned getElemsPerThread(Type type); + +SmallVector getThreadsPerWarp(const Attribute &layout); + +SmallVector getWarpsPerCTA(const Attribute &layout); + +SmallVector getSizePerThread(const Attribute &layout); + +SmallVector getContigPerThread(Attribute layout); + +SmallVector getThreadsPerCTA(const Attribute &layout); + +SmallVector getShapePerCTA(const Attribute &layout); + +SmallVector getOrder(const Attribute &layout); + +} // namespace gpu +} // namespace triton +} // namespace mlir + +#endif // TRITON_DIALECT_TRITONGPU_IR_DIALECT_H_ diff --git a/include/triton/Dialect/TritonGPU/IR/Traits.h b/include/triton/Dialect/TritonGPU/IR/Traits.h new file mode 100644 index 000000000000..44def95804da --- /dev/null +++ b/include/triton/Dialect/TritonGPU/IR/Traits.h @@ -0,0 +1,31 @@ +#ifndef TRITON_GPU_IR_TRAITS_H_ +#define TRITON_GPU_IR_TRAITS_H_ + +#include "mlir/IR/OpDefinition.h" + +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/Support/LogicalResult.h" + +namespace mlir { +namespace OpTrait { + +// These functions are out-of-line implementations of the methods in the +// corresponding trait classes. This avoids them being template +// instantiated/duplicated. +namespace impl { +LogicalResult verifyResultsAreSharedEncoding(Operation *op); +} // namespace impl + +template +class ResultsAreSharedEncoding + : public TraitBase { +public: + static LogicalResult verifyTrait(Operation *op) { + return impl::verifyResultsAreSharedEncoding(op); + } +}; + +} // namespace OpTrait +} // namespace mlir + +#endif diff --git a/include/triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.td b/include/triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.td new file mode 100644 index 000000000000..dbf8a10f5eba --- /dev/null +++ b/include/triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.td @@ -0,0 +1,481 @@ +#ifndef TRITONGPU_ATTRDEFS +#define TRITONGPU_ATTRDEFS + +include "triton/Dialect/TritonGPU/IR/TritonGPUDialect.td" +include "triton/Dialect/Triton/IR/TritonInterfaces.td" + +//===----------------------------------------------------------------------===// +// TritonGPU Attribute Definitions +//===----------------------------------------------------------------------===// + +class TritonGPU_Attr traits = [], + string baseCppClass = "::mlir::Attribute"> + : AttrDef { + + let description = [{ +TritonGPU Tensors differ from usual tensors in that they contain a _layout_ attribute which determines +how the data should be partitioned across CUDA threads. Formally speaking, we define a layout as a function +\mathcal{L} that maps a multi-dimensional tensor index $i \in \mathbb{Z}^d$ to a set of integers T corresponding +to the indices of the CUDA threads allowed to access some data at index $i$. + +For example, let us consider the layout function: +\mathcal{L}(0, 0) = {0, 4} +\mathcal{L}(0, 1) = {1, 5} +\mathcal{L}(1, 0) = {2, 6} +\mathcal{L}(1, 1) = {3, 7} + +Then, attaching $\mathcal{L} to a tensor $T$ would mean that: +- T[0,0] is owned by both cuda thread 0 and 4 +- T[0,1] is owned by both cuda thread 1 and 5 +- T[1,0] is owned by both cuda thread 2 and 6 +- T[1,1] is owned by both cuda thread 3 and 7 + +Right now, Triton implements two classes of layouts: shared, and distributed. + }]; + + code extraBaseClassDeclaration = [{ + unsigned getElemsPerThread(ArrayRef shape) const; + ::mlir::LogicalResult verifyLayoutForArg(::mlir::Operation* op, unsigned argNo) const; + }]; +} + +//===----------------------------------------------------------------------===// +// Shared Layout Encoding +//===----------------------------------------------------------------------===// + +def SharedEncodingAttr : TritonGPU_Attr<"SharedEncoding"> { + let mnemonic = "shared"; + + let description = [{ +An encoding for tensors whose elements may be simultaneously accessed by +different cuda threads in the programs, via shared memory. In other words, +for all indices i \in R^d, \mathcal{L}(i) = {0, 1, ..., 32*num_warps - 1}. + +In order to avoid shared memory bank conflicts, elements may be swizzled +in memory. For example, a swizzled row-major layout could store its data +as follows: + +A_{0, 0} A_{0, 1} A_{0, 2} A_{0, 3} ... [phase 0] \ per_phase = 2 +A_{1, 0} A_{1, 1} A_{1, 2} A_{1, 3} ... [phase 0] / +groups of vec=2 elements +are stored contiguously +_ _ _ _ /\_ _ _ _ +A_{2, 2} A_{2, 3} A_{2, 0} A_{2, 1} ... [phase 1] \ per phase = 2 +A_{3, 2} A_{3, 3} A_{3, 0} A_{3, 1} ... [phase 1] / + }]; + + let parameters = ( + ins + // swizzle info + "unsigned":$vec, "unsigned":$perPhase, "unsigned":$maxPhase, + ArrayRefParameter<"unsigned", "order of axes by the rate of changing">:$order + ); + + let builders = [ + AttrBuilder<(ins "DotOperandEncodingAttr":$dotOpEnc, + "ArrayRef":$shape, + "ArrayRef":$order, + "Type":$eltTy), [{ + auto mmaEnc = dotOpEnc.getParent().dyn_cast(); + + if(!mmaEnc) + return $_get(context, 1, 1, 1, order); + + int opIdx = dotOpEnc.getOpIdx(); + + // number of rows per phase + int perPhase = 128 / (shape[order[0]] * (eltTy.getIntOrFloatBitWidth() / 8)); + perPhase = std::max(perPhase, 1); + + // index of the inner dimension in `order` + unsigned inner = (opIdx == 0) ? 0 : 1; + + // ---- begin Volta ---- + if (mmaEnc.isVolta()) { + bool is_row = order[0] != 0; + bool is_vec4 = opIdx == 0 ? !is_row && (shape[order[0]] <= 16) : + is_row && (shape[order[0]] <= 16); + // TODO[Superjomn]: Support the case when is_vec4=false later + // Currently, we only support ld.v2, for the mma layout varies with different ld vector width. + is_vec4 = true; + int pack_size = opIdx == 0 ? ((is_row || is_vec4) ? 1 : 2) : + ((is_row && !is_vec4) ? 2 : 1); + int rep = 2 * pack_size; + int maxPhase = (order[inner] == 1 ? 8 : 4) / perPhase; + int vec = 2 * rep; + return $_get(context, vec, perPhase, maxPhase, order); + } + + // ---- begin Ampere ---- + if (mmaEnc.isAmpere()) { + std::vector matShape = {8, 8, + 2 * 64 / eltTy.getIntOrFloatBitWidth()}; + // for now, disable swizzle when using transposed int8 tensor cores + if (eltTy.isInteger(8) && order[0] == inner) + return $_get(context, 1, 1, 1, order); + + // --- handle A operand --- + if (opIdx == 0) { // compute swizzling for A operand + int vec = (order[0] == 1) ? matShape[2] : matShape[0]; // k : m + int mmaStride = (order[0] == 1) ? matShape[0] : matShape[2]; + int maxPhase = mmaStride / perPhase; + return $_get(context, vec, perPhase, maxPhase, order); + } + + // --- handle B operand --- + if (opIdx == 1) { + int vec = (order[0] == 1) ? matShape[1] : matShape[2]; // n : k + int mmaStride = (order[0] == 1) ? matShape[2] : matShape[1]; + int maxPhase = mmaStride / perPhase; + return $_get(context, vec, perPhase, maxPhase, order); + } + + llvm_unreachable("invalid operand index"); + } + + // ---- not implemented ---- + llvm_unreachable("unsupported swizzling for provided MMA version"); + + + }]> + ]; + + let extraClassDeclaration = extraBaseClassDeclaration; +} + +//===----------------------------------------------------------------------===// +// Distributed Layout Encoding +//===----------------------------------------------------------------------===// + +class DistributedEncoding : TritonGPU_Attr { + let description = [{ +Distributed encodings have a layout function that is entirely characterized +by a d-dimensional tensor L. Note that L doesn't need to have the same shape +(or even the same rank) as the tensor it is encoding. + +The layout function \mathcal{L} of this layout is then defined, for an +index `i` \in R^D, as follows: + +\mathcal{L}(A)[i_d] = L[(i_d + k_d*A.shape[d]) % L.shape[d]] \forall k_d such as i_d + k_d*A.shape[d] < L.shape[d] + +For example, for a tensor/layout pair +A = [x x x x x x x x] + [x x x x x x x x] +L = [0 1 2 3 ] + [4 5 6 7 ] + [8 9 10 11] + [12 13 14 15] + +Then the data of A would be distributed as follow between the 16 CUDA threads: +L(A) = [ {0,8} , {1,9} , {2,10}, {3,11}, {0,8} , {1, 9} , {2, 10}, {3, 11}, + {4,12}, {5,13}, {6,14}, {7,15}, {4,12}, {5, 13}, {6, 14}, {7, 15} ] + }]; + + let extraClassDeclaration = extraBaseClassDeclaration; +} + +//===----------------------------------------------------------------------===// +// Blocked Layout Encoding +//===----------------------------------------------------------------------===// + +def BlockedEncodingAttr : DistributedEncoding<"BlockedEncoding"> { + let mnemonic = "blocked"; + + let description = [{ +An encoding where each warp owns a contiguous portion of the target tensor. This is typically the kind of data layout +used to promote memory coalescing in LoadInst and StoreInst. +It is characterized by three tuples -- thread tile size, warp tile size, and block tile size -- which +specify the amount of elements owned by each CUDA thread, warp and CTA respectively. + +For example, a row-major coalesced layout may partition a 16x16 tensor over 2 warps (i.e. 64 threads) as follows. + +[ 0 0 1 1 2 2 3 3 ; 32 32 33 33 34 34 35 35 ] +[ 0 0 1 1 2 2 3 3 ; 32 32 33 33 34 34 35 35 ] +[ 4 4 5 5 6 6 7 7 ; 36 36 37 37 38 38 39 39 ] +[ 4 4 5 5 6 6 7 7 ; 36 36 37 37 38 38 39 39 ] +... +[ 28 28 29 29 30 30 31 31 ; 60 60 61 61 62 62 63 63 ] +[ 28 28 29 29 30 30 31 31 ; 60 60 61 61 62 62 63 63 ] + +for + +#triton_gpu.blocked_layout<{ + sizePerThread = {2, 2} + threadsPerWarp = {8, 4} + warpsPerCTA = {1, 2} +}> +}]; + + + let builders = [ + // Custom builder initializes sizePerWarp and sizePerCTA automatically + // TODO: compiles on MacOS but not linux? + // AttrBuilder<(ins "ArrayRef":$sizePerThread, + // "ArrayRef":$threadsPerWarp, + // "ArrayRef":$warpsPerCTA, + // "ArrayRef":$order), [{ + // int rank = threadsPerWarp.size(); + // SmallVector sizePerWarp(rank); + // SmallVector sizePerCTA(rank); + // for (unsigned i = 0; i < rank; i++) { + // sizePerWarp.push_back(sizePerThread[i] * threadsPerWarp[i]); + // sizePerCTA.push_back(sizePerWarp[i] * warpsPerCTA[i]); + // } + // return $_get(context, sizePerThread, threadsPerWarp, warpsPerCTA, order, sizePerWarp, sizePerCTA); + // }]>, + // Custom builder initializes sizePerWarp and sizePerCTA automatically + // Default builder takes sizePerThread, order and numWarps, and tries to + // pack numWarps*32 threads in the provided order for use in a type + // of the given shape. + AttrBuilder<(ins "ArrayRef":$shape, + "ArrayRef":$sizePerThread, + "ArrayRef":$order, + "unsigned":$numWarps), [{ + int rank = sizePerThread.size(); + unsigned remainingLanes = 32; + unsigned remainingThreads = numWarps*32; + unsigned remainingWarps = numWarps; + unsigned prevLanes = 1; + unsigned prevWarps = 1; + SmallVector threadsPerWarp(rank); + SmallVector warpsPerCTA(rank); + for (int _dim = 0; _dim < rank - 1; ++_dim) { + int i = order[_dim]; + unsigned threadsPerCTA = std::clamp(remainingThreads, 1, shape[i] / sizePerThread[i]); + threadsPerWarp[i] = std::clamp(threadsPerCTA, 1, remainingLanes); + warpsPerCTA[i] = std::clamp(threadsPerCTA / threadsPerWarp[i], 1, remainingWarps); + remainingWarps /= warpsPerCTA[i]; + remainingLanes /= threadsPerWarp[i]; + remainingThreads /= threadsPerCTA; + prevLanes *= threadsPerWarp[i]; + prevWarps *= warpsPerCTA[i]; + } + // Expand the last dimension to fill the remaining lanes and warps + threadsPerWarp[order[rank-1]] = 32 / prevLanes; + warpsPerCTA[order[rank-1]] = numWarps / prevWarps; + + return $_get(context, sizePerThread, threadsPerWarp, warpsPerCTA, order); + + }]> + ]; + + let extraClassDeclaration = extraBaseClassDeclaration # [{ + SliceEncodingAttr squeeze(int axis); + }]; + + let parameters = ( + ins + ArrayRefParameter<"unsigned">:$sizePerThread, + ArrayRefParameter<"unsigned">:$threadsPerWarp, + ArrayRefParameter<"unsigned">:$warpsPerCTA, + // fastest-changing axis first + ArrayRefParameter< + "unsigned", + "order of axes by the rate of changing" + >:$order + // These attributes can be inferred from the rest + // ArrayRefParameter<"unsigned">:$sizePerWarp, + // ArrayRefParameter<"unsigned">:$sizePerCTA + ); + +} + +//===----------------------------------------------------------------------===// +// MMA Layout Encoding +//===----------------------------------------------------------------------===// +// TODO: MMAv1 and MMAv2 should be two instances of the same class + +def MmaEncodingAttr : DistributedEncoding<"MmaEncoding"> { + let mnemonic = "mma"; + + let description = [{ +An encoding for tensors that have been produced by tensor cores. +It is characterized by two parameters: +- A 'versionMajor' which specifies the generation the tensor cores +whose output is being partitioned: 1 for first-gen tensor cores (Volta), +and 2 for second-gen tensor cores (Turing/Ampere). +- A 'versionMinor' which indicates the specific layout of a tensor core +generation, e.g. for Volta, there might be multiple kinds of layouts annotated +by 0,1,2 and so on. +- A `blockTileSize` to indicate how data should be +partitioned between warps. + +// -------------------------------- version = 1 --------------------------- // + +For first-gen tensor cores, the implicit warpTileSize is [16, 16]. +Note: the layout is different from the recommended in PTX ISA +https://docs.nvidia.com/cuda/parallel-thread-execution/index.html +(mma.884 section, FP32 accumulator). + +For example, when versionMinor=1, the matrix L corresponding to +blockTileSize=[32,16] is: + + warp 0 +--------------------------------/\------------------------------- +[ 0 0 2 2 8 8 10 10 0 0 2 2 8 8 10 10 ] +[ 1 1 3 3 9 9 11 11 1 1 3 3 9 9 11 11 ] +[ 0 0 2 2 8 8 10 10 0 0 2 2 8 8 10 10 ] +[ 1 1 3 3 9 9 11 11 1 1 3 3 9 9 11 11 ] +[ 4 4 6 6 12 12 14 14 4 4 6 6 12 12 14 14 ] +[ 5 5 7 7 13 13 15 15 5 5 7 7 13 13 15 15 ] +[ 4 4 6 6 12 12 14 14 4 4 6 6 12 12 14 14 ] +[ 5 5 7 7 13 13 15 15 5 5 7 7 13 13 15 15 ] +[ 16 16 18 18 20 20 22 22 16 16 18 18 20 20 22 22 ] +[ 17 17 19 19 21 21 23 23 17 17 19 19 21 21 23 23 ] +[ 16 16 18 18 20 20 22 22 16 16 18 18 20 20 22 22 ] +[ 17 17 19 19 21 21 23 23 17 17 19 19 21 21 23 23 ] +[ 24 24 26 26 28 28 30 30 24 24 26 26 28 28 30 30 ] +[ 25 25 27 27 29 29 31 31 25 25 27 27 29 29 31 31 ] +[ 24 24 26 26 28 28 30 30 24 24 26 26 28 28 30 30 ] +[ 25 25 27 27 29 29 31 31 25 25 27 27 29 29 31 31 ] + + warp 1 = warp0 + 32 +--------------------------------/\------------------------------- +[ 32 32 34 34 40 40 42 42 32 32 34 34 40 40 42 42 ] +[ 33 33 35 35 41 41 43 43 33 33 35 35 41 41 43 43 ] +[ ............................................................... ] + + +// -------------------------------- version = 2 --------------------------- // + +For second-gen tensor cores, the implicit warpTileSize is [16, 8]. +Information about this layout can be found in the official PTX documentation +https://docs.nvidia.com/cuda/parallel-thread-execution/index.html +(mma.16816 section, FP32 accumulator). + +For example, the matrix L corresponding to blockTileSize=[32,16] is: + warp 0 warp 1 +-----------------/\------------- ----------------/\------------- +[ 0 0 1 1 2 2 3 3 32 32 33 33 34 34 35 35 +[ 4 4 5 5 6 6 7 7 36 36 37 37 38 38 39 39 +[ .............................. .............................. +[ 28 28 29 29 30 30 31 31 60 60 61 61 62 62 63 63 +[ 0 0 1 1 2 2 3 3 32 32 33 33 34 34 35 35 +[ 4 4 5 5 6 6 7 7 36 36 37 37 38 38 39 39 +[ .............................. .............................. +[ 28 28 29 29 30 30 31 31 60 60 61 61 62 62 63 63 + + warp 3 warp 4 +----------------/\------------- ----------------/\------------- +[ 64 64 65 65 66 66 67 67 96 96 97 97 98 98 99 99 +[ 68 68 69 69 70 70 71 71 100 100 101 101 102 102 103 103 +[ .............................. ............................... +[ 92 92 93 93 94 94 95 95 124 124 125 125 126 126 127 127 +[ 64 64 65 65 66 66 67 67 96 96 97 97 98 98 99 99 +[ 68 68 69 69 70 70 71 71 100 100 101 101 102 102 103 103 +[ .............................. ............................... +[ 92 92 93 93 94 94 95 95 124 124 125 125 126 126 127 127 + +}]; + + let parameters = ( + ins + "unsigned":$versionMajor, + "unsigned":$versionMinor, + ArrayRefParameter<"unsigned">:$warpsPerCTA + ); + + let builders = [ + // specific for MMAV1(Volta) + AttrBuilder<(ins "int":$versionMajor, + "ArrayRef":$warpsPerCTA, + "ArrayRef":$shapeA, + "ArrayRef":$shapeB, + "bool":$isARow, + "bool":$isBRow), [{ + assert(versionMajor == 1 && "Only MMAv1 has multiple versionMinor."); + bool isAVec4 = !isARow && (shapeA[isARow] <= 16); + bool isBVec4 = isBRow && (shapeB[isBRow] <= 16); + // 4-bits to encode 4 booleans: [isARow, isBRow, isAVec4, isBVec4] + int versionMinor = (isARow * (1<<0)) |\ + (isBRow * (1<<1)) |\ + (isAVec4 * (1<<2)) |\ + (isBVec4 * (1<<3)); + return $_get(context, versionMajor, versionMinor, warpsPerCTA); + }]> + + ]; + + let extraClassDeclaration = extraBaseClassDeclaration # [{ + bool isVolta() const; + bool isAmpere() const; + // Get [isARow, isBRow, isAVec4, isBVec4] from versionMinor + std::tuple decodeVoltaLayoutStates() const; + }]; + +} + +def SliceEncodingAttr : DistributedEncoding<"SliceEncoding"> { + let mnemonic = "slice"; + + let description = [{ + TODO: improve docs + + A = [x x x x x x x x] + + parent = [0 1 2 3 ] + [4 5 6 7 ] + [8 9 10 11] + [12 13 14 15] + dim = 0 + + Then the data of A would be distributed as follow between the 16 CUDA threads: + L(A) = [ {0,4,8,12} , {1,5,9,13} , ... {3,7,11,15}, {0,4,8,12} , ..., {3,7,11,15} ] + + This is useful for constructing the inverse layout of an expand_dims operation during some optimization passes. + + }]; + + let parameters = ( + ins + "unsigned":$dim, + // TODO: constraint here to only take distributed encodings + "Attribute":$parent + ); + + let extraClassDeclaration = extraBaseClassDeclaration # [{ + template + SmallVector paddedShape(ArrayRef shape) const; + }]; +} + +def DotOperandEncodingAttr : DistributedEncoding<"DotOperandEncoding"> { + let mnemonic = "dot_op"; + + let description = [{ +In TritonGPU dialect, considering `d = tt.dot a, b, c` +tt.dot's operands a and b must be of DotOperandEncodingAttr layout. +a's opIdx is 0, b's opIdx is 1. +The parend field in DotOperandEncodingAttr is the layout of d. + +For MMA v1, an additional attribute `isMMAv1Row` determines whether e.g. the a operand is used +in the context of an mma.884.row.col or an mma.884.col.col operation. See the PTX ISA documentation +section 9.7.13.4.1 for more details. + }]; + + let parameters = ( + ins + "unsigned":$opIdx, + "Attribute":$parent, + "Attribute":$isMMAv1Row + ); + + let builders = [ + AttrBuilder<(ins "unsigned":$opIdx, + "Attribute":$parent), [{ + Attribute isMMAv1Row; + if(parent.isa() && + parent.cast().isVolta()){ + isMMAv1Row = BoolAttr::get(context, true); + } + return $_get(context, opIdx, parent, isMMAv1Row); + }]> + + ]; + + let extraClassDeclaration = extraBaseClassDeclaration; +} + + + +#endif diff --git a/include/triton/Dialect/TritonGPU/IR/TritonGPUDialect.td b/include/triton/Dialect/TritonGPU/IR/TritonGPUDialect.td new file mode 100644 index 000000000000..87ec1d36c697 --- /dev/null +++ b/include/triton/Dialect/TritonGPU/IR/TritonGPUDialect.td @@ -0,0 +1,36 @@ +#ifndef TRITONGPU_DIALECT +#define TRITONGPU_DIALECT + +include "mlir/IR/OpBase.td" + +def TritonGPU_Dialect : Dialect { + let name = "triton_gpu"; + + let cppNamespace = "::mlir::triton::gpu"; + + let hasOperationAttrVerify = 1; + + let description = [{ + Triton GPU Dialect. + }]; + + let dependentDialects = [ + "triton::TritonDialect", + "mlir::gpu::GPUDialect", + "tensor::TensorDialect", + ]; + + let extraClassDeclaration = [{ + static std::string getNumWarpsAttrName() { return "triton_gpu.num-warps"; } + static int getNumWarps(ModuleOp mod) { + if(!mod->hasAttr("triton_gpu.num-warps")) + llvm::report_fatal_error( + "TritonGPU module should contain a triton_gpu.num-warps attribute"); + return mod->getAttr("triton_gpu.num-warps").cast().getInt(); + } + }]; + + +} + +#endif diff --git a/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td b/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td new file mode 100644 index 000000000000..b7f40d6d54a8 --- /dev/null +++ b/include/triton/Dialect/TritonGPU/IR/TritonGPUOps.td @@ -0,0 +1,198 @@ +#ifndef TRITONGPU_OPS +#define TRITONGPU_OPS + +include "triton/Dialect/TritonGPU/IR/TritonGPUDialect.td" +include "triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.td" +include "mlir/Dialect/Arithmetic/IR/ArithmeticBase.td" +include "triton/Dialect/Triton/IR/TritonTypes.td" +include "triton/Dialect/Triton/IR/TritonAttrDefs.td" +include "mlir/IR/OpBase.td" +include "mlir/Interfaces/SideEffectInterfaces.td" // NoSideEffect +include "mlir/Interfaces/InferTypeOpInterface.td" // SameOperandsAndResultType + +def ResultsAreSharedEncoding: NativeOpTrait<"ResultsAreSharedEncoding">; + +class TTG_Op traits = []> : + Op; + +def TTG_ConvertLayoutOp : TTG_Op<"convert_layout", + [SameOperandsAndResultShape, NoSideEffect]> { + let summary = "convert layout"; + + let arguments = (ins TT_Tensor:$src); + + let results = (outs TT_Tensor:$result); + + let assemblyFormat = "$src attr-dict `:` functional-type(operands, results)"; +} + +def TTG_AsyncWaitOp : TTG_Op<"async_wait"> { + let summary = "async wait"; + + let arguments = (ins I32Attr:$num); + + let assemblyFormat = "attr-dict"; + + let extraClassDeclaration = [{ + static bool isSupported(int computeCapability) { + return computeCapability >= 80; + } + }]; +} + +// Port Arith_CmpIOp & Arith_CmpFOp & Std_SelectOp to TritonGPU. +// This is needed because these ops don't +// handle encodings +// e.g., https://github.com/llvm/llvm-project/blob/main/mlir/include/mlir/Dialect/Arith/IR/ArithOps.td#L111 +def TTG_CmpIOp : TTG_Op<"cmpi", [NoSideEffect, Elementwise, + SameOperandsAndResultShape, + SameOperandsAndResultEncoding]> { + let summary = "integer comparison operation"; + + let description = [{}]; + + let arguments = (ins Arith_CmpIPredicateAttr:$predicate, + TT_IntLike:$lhs, + TT_IntLike:$rhs); + + let results = (outs TT_BoolLike:$result); +} + +def TTG_CmpFOp : TTG_Op<"cmpf", [NoSideEffect, Elementwise, + SameOperandsAndResultShape, + SameOperandsAndResultEncoding]> { + let summary = "floating-point comparison operation"; + + let description = [{}]; + + let arguments = (ins Arith_CmpFPredicateAttr:$predicate, + TT_FloatLike:$lhs, + TT_FloatLike:$rhs); + + let results = (outs TT_BoolLike:$result); +} + +// TODO: migrate to arith::SelectOp on LLVM16 +def TTG_SelectOp : TTG_Op<"select", [NoSideEffect, Elementwise, + SameOperandsAndResultShape, + SameOperandsAndResultEncoding]> { + let summary = "select operation"; + + let description = [{}]; + + let arguments = (ins TT_BoolLike:$condition, + TT_Tensor:$true_value, + TT_Tensor:$false_value); + + let results = (outs TT_Tensor:$result); +} + + +def TTG_InsertSliceAsyncOp : TTG_Op<"insert_slice_async", + [AttrSizedOperandSegments, + ResultsAreSharedEncoding, + MemoryEffects<[MemRead]>, + TypesMatchWith<"infer mask type from src type", + "src", "mask", "getI1SameShape($_self)", + "($_op.getOperands().size() <= 3) || std::equal_to<>()">, + TypesMatchWith<"infer other type from src type", + "src", "other", "getPointeeType($_self)", + "($_op.getOperands().size() <= 4) || std::equal_to<>()">]> { + let summary = "insert slice async"; + + let description = [{ + This operation inserts a tensor `$src` into another tensor `$dst` as specified by the operation’s + `$index` argument and `$axis` attribute. + + It returns a copy of `$dst` with the proper slice updated asynchronously with the value of `$src`. + This operation is non-blocking, and `$results` will have the updated value after the corresponding async_wait. + + When converting from `tt.load` to `triton_gpu.insert_slice_async`, the `$evict`, `$cache`, and `$isVolatile` fields + might be ignored on certain hardware. For example, on NVIDIA GPUs, the cache policy is determined by the backend, + and `$evict` and `$isVolatile` are ignored because they apply to L1 cache only. + + The insert_slice_async operation supports the following arguments: + + * src: the tensor that is inserted. + * dst: the tensor into which the `$src` tensor is inserted. + * index: the index of the `$src` tensor at the given `$axis` from which the `$dst` tensor is inserted into + * mask: optional tensor-rank number of boolean masks which specify which + elements of the `$src` tensor are inserted into the `$dst` tensor. + * other: optional tensor-rank number of other tensors which specify what + values are inserted into the `$dst` tensor if the corresponding + element of the `$mask` tensor is false. + + In the future, we may decompose this operation into a sequence of: + + * `async` operation to specify a sequence of asynchronous operations + * `load` operation to load a tensor from global memory + * `insert_slice` operations to insert the `$src` tensor into the `$dst` tensor + + Example: + + ``` + %1 = triton_gpu.alloc_tensor : tensor<2x32xf32> + %2 = triton_gpu.insert_slice_async %0, %1, %index { axis = 0 } : tensor<32x!tt.ptr, #AL> -> tensor<2x32xf32, #A> + triiton_gpu.async_wait { num = 0 : i32 } + ``` + }]; + + let arguments = (ins TT_PtrTensor:$src, TT_Tensor:$dst, I32:$index, + Optional:$mask, Optional:$other, + TT_CacheModifierAttr:$cache, TT_EvictionPolicyAttr:$evict, + BoolAttr:$isVolatile, I32Attr:$axis); + + let builders = [ + OpBuilder<(ins "Value":$src, "Value":$dst, "Value":$index, + "triton::CacheModifier":$cache, + "triton::EvictionPolicy":$evict, "bool":$isVolatile, "int":$axis)>, + OpBuilder<(ins "Value":$src, "Value":$dst, "Value":$index, "Value":$mask, + "triton::CacheModifier":$cache, + "triton::EvictionPolicy":$evict, "bool":$isVolatile, "int":$axis)>, + OpBuilder<(ins "Value":$src, "Value":$dst, "Value":$index, + "Value":$mask, "Value":$other, + "triton::CacheModifier":$cache, + "triton::EvictionPolicy":$evict, "bool":$isVolatile, "int":$axis)>, + ]; + + let results = (outs TT_Tensor:$result); + + //let assemblyFormat = [{ + // $src `,` $dst `` + // $index, $mask, $other + // attr-dict `:` type($src) `->` type($dst) + //}]; + + let extraClassDeclaration = [{ + static DenseSet getEligibleLoadByteWidth(int computeCapability) { + DenseSet validLoadBytes; + if (computeCapability >= 80) { + validLoadBytes = {4, 8, 16}; + } + return validLoadBytes; + } + }]; + + // The custom parser could be replaced with oilist in LLVM-16 + let parser = [{ return parseInsertSliceAsyncOp(parser, result); }]; + + let printer = [{ return printInsertSliceAsyncOp(p, *this); }]; +} + +def TTG_AllocTensorOp : TTG_Op<"alloc_tensor", [MemoryEffects<[MemAlloc]>, // Allocate shared memory + ResultsAreSharedEncoding]> { + let summary = "allocate tensor"; + + let description = [{ + This operation defines a tensor of a particular shape. + The contents of the tensor are supposed to be in shared memory. + + Note: This op can be repalced to a `bufferization.alloc_tensor` in LLVM 16. + }]; + + let assemblyFormat = [{attr-dict `:` type($result)}]; + + let results = (outs TT_Tensor:$result); +} + +#endif diff --git a/include/triton/Dialect/TritonGPU/Transforms/CMakeLists.txt b/include/triton/Dialect/TritonGPU/Transforms/CMakeLists.txt new file mode 100644 index 000000000000..6be94d1a8a35 --- /dev/null +++ b/include/triton/Dialect/TritonGPU/Transforms/CMakeLists.txt @@ -0,0 +1,3 @@ +set(LLVM_TARGET_DEFINITIONS Passes.td) +mlir_tablegen(Passes.h.inc -gen-pass-decls -name TritonGPU) +add_public_tablegen_target(TritonGPUTransformsIncGen) diff --git a/include/triton/Dialect/TritonGPU/Transforms/Passes.h b/include/triton/Dialect/TritonGPU/Transforms/Passes.h new file mode 100644 index 000000000000..7e02fb2b9109 --- /dev/null +++ b/include/triton/Dialect/TritonGPU/Transforms/Passes.h @@ -0,0 +1,25 @@ +#ifndef TRITON_DIALECT_TRITONGPU_TRANSFORMS_PASSES_H_ +#define TRITON_DIALECT_TRITONGPU_TRANSFORMS_PASSES_H_ + +#include "mlir/Pass/Pass.h" + +namespace mlir { +std::unique_ptr createTritonGPUPipelinePass(int numStages = 2); + +// TODO(Keren): prefetch pass not working yet +std::unique_ptr createTritonGPUPrefetchPass(); + +std::unique_ptr createTritonGPUCanonicalizeLoopsPass(); + +std::unique_ptr createTritonGPUCoalescePass(); + +std::unique_ptr createTritonGPUCombineOpsPass(int computeCapability = 80); + +std::unique_ptr createTritonGPUVerifier(); + +/// Generate the code for registering passes. +#define GEN_PASS_REGISTRATION +#include "triton/Dialect/TritonGPU/Transforms/Passes.h.inc" + +} // namespace mlir +#endif diff --git a/include/triton/Dialect/TritonGPU/Transforms/Passes.td b/include/triton/Dialect/TritonGPU/Transforms/Passes.td new file mode 100644 index 000000000000..f22a76c5597f --- /dev/null +++ b/include/triton/Dialect/TritonGPU/Transforms/Passes.td @@ -0,0 +1,87 @@ +#ifndef TRITONGPU_PASSES +#define TRITONGPU_PASSES + +include "mlir/Pass/PassBase.td" + +def TritonGPUPipeline : Pass<"tritongpu-pipeline", "mlir::ModuleOp"> { + let summary = "pipeline"; + + let description = [{ + Unroll loops to hide global memory -> shared memory latency. + }]; + + let constructor = "mlir::createTritonGPUPipelinePass()"; + + let dependentDialects = ["mlir::triton::gpu::TritonGPUDialect", + "mlir::scf::SCFDialect", + "mlir::arith::ArithmeticDialect"]; + + let options = [ + Option<"numStages", "num-stages", + "int32_t", /*default*/"2", + "number of pipeline stages"> + ]; +} + +def TritonGPUPrefetch : Pass<"tritongpu-prefetch", "mlir::ModuleOp"> { + let summary = "prefetch"; + + let description = [{ + Prefetch operands (a and b) of tt.dot into shared memory to hide shared memory -> register latency. + }]; + + let constructor = "mlir::createTritonGPUPrefetchPass()"; + + let dependentDialects = ["mlir::triton::gpu::TritonGPUDialect", + "mlir::scf::SCFDialect", + "mlir::arith::ArithmeticDialect"]; +} + +def TritonGPUCoalesce: Pass<"tritongpu-coalesce", "mlir::ModuleOp"> { + let summary = "coalesce"; + + let description = [{ + TODO + }]; + + let constructor = "mlir::createTritonGPUCoalescePass()"; + + let dependentDialects = ["mlir::triton::gpu::TritonGPUDialect"]; +} + +def TritonGPUCombineOps : Pass<"tritongpu-combine", "mlir::ModuleOp"> { + let summary = "combine triton gpu ops"; + + let description = [{ + convert_layout(convert_layout(%src, #LAYOUT_0), #LAYOUT_1) => + convert_layout(%src, #LAYOUT_1) + + convert_layout(%src, #LAYOUT) => %src if %src.layout() == #LAYOUT + }]; + + let constructor = "mlir::createTritonGPUCombineOpsPass()"; + + let dependentDialects = ["mlir::triton::gpu::TritonGPUDialect", + "mlir::triton::TritonDialect"]; + + let options = [ + Option<"computeCapability", "compute-capability", + "int32_t", /*default*/"80", + "device compute capability"> + ]; +} + +def TritonGPUCanonicalizeLoops: Pass<"tritongpu-canonicalize-loops", "mlir::ModuleOp"> { + let summary = "canonicalize scf.ForOp ops"; + + let description = [{ + This implements some optimizations that are missing in the standard scf.ForOp + canonicalizer. + }]; + + let constructor = "mlir::createTritonGPUCanonicalizeLoopsPass()"; + + let dependentDialects = ["mlir::triton::gpu::TritonGPUDialect"]; +} + +#endif diff --git a/include/triton/Dialect/TritonGPU/Transforms/TritonGPUConversion.h b/include/triton/Dialect/TritonGPU/Transforms/TritonGPUConversion.h new file mode 100644 index 000000000000..08f419630c53 --- /dev/null +++ b/include/triton/Dialect/TritonGPU/Transforms/TritonGPUConversion.h @@ -0,0 +1,33 @@ +//===----------------------------------------------------------------------===// +// +// Defines utilities to use while converting to the TritonGPU dialect. +// +//===----------------------------------------------------------------------===// + +#ifndef TRITON_DIALECT_TRITONGPU_TRANSFORMS_TRITONGPUCONVERSION_H_ +#define TRITON_DIALECT_TRITONGPU_TRANSFORMS_TRITONGPUCONVERSION_H_ + +#include "mlir/Transforms/DialectConversion.h" + +namespace mlir { + +class TritonGPUTypeConverter : public TypeConverter { +public: + TritonGPUTypeConverter(MLIRContext *context, int numWarps); + int getNumWarps() const { return numWarps; } + +private: + MLIRContext *context; + int numWarps; +}; + +class TritonGPUConversionTarget : public ConversionTarget { + +public: + explicit TritonGPUConversionTarget(MLIRContext &ctx, + TritonGPUTypeConverter &typeConverter); +}; + +} // namespace mlir + +#endif // TRITON_DIALECT_TRITONGPU_TRANSFORMS_TRITONGPUCONVERSION_H_ diff --git a/include/triton/Target/LLVMIR/LLVMIRTranslation.h b/include/triton/Target/LLVMIR/LLVMIRTranslation.h new file mode 100644 index 000000000000..b83ff9f57584 --- /dev/null +++ b/include/triton/Target/LLVMIR/LLVMIRTranslation.h @@ -0,0 +1,39 @@ +#ifndef TRITON_TARGET_LLVMIRTRANSLATION_H +#define TRITON_TARGET_LLVMIRTRANSLATION_H +#include "llvm/ADT/StringRef.h" +#include +#include +#include + +namespace llvm { +class Module; +class LLVMContext; +} // namespace llvm + +namespace mlir { +class ModuleOp; +} // namespace mlir + +namespace mlir { +namespace triton { + +// add external dependent libs +void addExternalLibs(mlir::ModuleOp &module, + const std::vector &names, + const std::vector &paths); + +// Translate TritonGPU dialect to LLVMIR, return null if failed. +std::unique_ptr +translateTritonGPUToLLVMIR(llvm::LLVMContext *llvmContext, + mlir::ModuleOp module, int computeCapability); + +// Translate mlir LLVM dialect to LLVMIR, return null if failed. +std::unique_ptr +translateLLVMToLLVMIR(llvm::LLVMContext *llvmContext, mlir::ModuleOp module); + +bool linkExternLib(llvm::Module &module, llvm::StringRef path); + +} // namespace triton +} // namespace mlir + +#endif // TRITON_TARGET_LLVMIRTRANSLATION_H diff --git a/include/triton/Target/PTX/PTXTranslation.h b/include/triton/Target/PTX/PTXTranslation.h new file mode 100644 index 000000000000..63ea87a5c2e2 --- /dev/null +++ b/include/triton/Target/PTX/PTXTranslation.h @@ -0,0 +1,17 @@ +#ifndef TRITON_TARGET_PTXTRANSLATION_H +#define TRITON_TARGET_PTXTRANSLATION_H + +#include + +namespace llvm { +class Module; +} // namespace llvm + +namespace triton { + +// Translate TritonGPU IR to PTX code. +std::string translateLLVMIRToPTX(llvm::Module &module, int cc, int version); + +} // namespace triton + +#endif diff --git a/include/triton/tools/sys/getenv.hpp b/include/triton/Tools/Sys/GetEnv.hpp old mode 100755 new mode 100644 similarity index 63% rename from include/triton/tools/sys/getenv.hpp rename to include/triton/Tools/Sys/GetEnv.hpp index 755a84a6670f..7dd960070149 --- a/include/triton/tools/sys/getenv.hpp +++ b/include/triton/Tools/Sys/GetEnv.hpp @@ -22,26 +22,32 @@ #ifndef TDL_TOOLS_SYS_GETENV_HPP #define TDL_TOOLS_SYS_GETENV_HPP -#include +#include #include +#include -namespace triton -{ - -namespace tools -{ +namespace triton { - inline std::string getenv(const char * name) - { - const char * cstr = std::getenv(name); - if(!cstr) - return ""; - std::string result(cstr); - return result; - } +namespace tools { +inline std::string getenv(const char *name) { + const char *cstr = std::getenv(name); + if (!cstr) + return ""; + std::string result(cstr); + return result; } +inline bool getBoolEnv(const std::string &env) { + const char *s = std::getenv(env.c_str()); + std::string str(s ? s : ""); + std::transform(str.begin(), str.end(), str.begin(), + [](unsigned char c) { return std::tolower(c); }); + return (str == "on" || str == "true" || str == "1"); } +} // namespace tools + +} // namespace triton + #endif diff --git a/include/triton/codegen/analysis/align.h b/include/triton/codegen/analysis/align.h deleted file mode 100644 index 9f4926cfcc59..000000000000 --- a/include/triton/codegen/analysis/align.h +++ /dev/null @@ -1,87 +0,0 @@ -#ifndef TDL_INCLUDE_CODEGEN_ALIGNMENT_INFO_PASS_H -#define TDL_INCLUDE_CODEGEN_ALIGNMENT_INFO_PASS_H - -#include -#include - -namespace triton { - -namespace ir { - class value; - class module; - class phi_node; - class splat_inst; - class cast_inst; - class cmp_inst; - class reshape_inst; - class dequantize_inst; - class broadcast_inst; - class binary_operator; - class getelementptr_inst; -} - -namespace codegen{ -namespace analysis{ - -class align { -private: - struct cst_info { - unsigned num_cst; - unsigned value; - }; - // helpers - std::vector get_shapes(ir::value *v); - // populate is_constant - std::vector populate_is_constant_phi(ir::phi_node* x); - std::vector populate_is_constant_splat(ir::splat_inst* x); - std::vector populate_is_constant_reshape(ir::reshape_inst* x); - std::vector populate_is_constant_dequantize(ir::dequantize_inst* x); - std::vector populate_is_constant_broadcast(ir::broadcast_inst* x); - std::vector populate_is_constant_binop(ir::binary_operator* x); - std::vector populate_is_constant_cmp(ir::cmp_inst* x); - std::vector populate_is_constant_gep(ir::getelementptr_inst* x); - std::vector populate_is_constant_default(ir::value* v); - std::vector populate_is_constant(ir::value *v); - // populate max_contiguous - std::vector populate_max_contiguous_phi(ir::phi_node* x); - std::vector populate_max_contiguous_splat(ir::splat_inst* x); - std::vector populate_max_contiguous_reshape(ir::reshape_inst* x); - std::vector populate_max_contiguous_dequantize(ir::dequantize_inst* x); - std::vector populate_max_contiguous_broadcast(ir::broadcast_inst* x); - std::vector populate_max_contiguous_binop(ir::binary_operator* x); - std::vector populate_max_contiguous_gep(ir::getelementptr_inst* x); - std::vector populate_max_contiguous_cast(ir::cast_inst* x); - std::vector populate_max_contiguous_default(ir::value* v); - std::vector populate_max_contiguous(ir::value *v); - // populate starting_multiple - std::vector populate_starting_multiple_phi(ir::phi_node* x); - std::vector populate_starting_multiple_splat(ir::splat_inst* x); - std::vector populate_starting_multiple_reshape(ir::reshape_inst* x); - std::vector populate_starting_multiple_dequantize(ir::dequantize_inst* x); - std::vector populate_starting_multiple_broadcast(ir::broadcast_inst* x); - std::vector populate_starting_multiple_binop(ir::binary_operator* x); - std::vector populate_starting_multiple_gep(ir::getelementptr_inst* x); - std::vector populate_starting_multiple_cast(ir::cast_inst* x); - std::vector populate_starting_multiple_default(ir::value* v); - std::vector populate_starting_multiple(ir::value *v); - // populate all maps - void populate(ir::value *v); - -public: - void run(ir::module &mod); - unsigned get(ir::value* v, unsigned ax) const; - std::vector contiguous(ir::value* v) const; - std::vector get_cst_info(ir::value* v) const; - -private: - std::map> is_constant_; - std::map> max_contiguous_; - std::map> starting_multiple_; -}; - - -} -} -} - -#endif diff --git a/include/triton/codegen/analysis/allocation.h b/include/triton/codegen/analysis/allocation.h deleted file mode 100644 index e49f5c591026..000000000000 --- a/include/triton/codegen/analysis/allocation.h +++ /dev/null @@ -1,47 +0,0 @@ -#ifndef TDL_INCLUDE_IR_CODEGEN_STORAGE_ALLOC_H -#define TDL_INCLUDE_IR_CODEGEN_STORAGE_ALLOC_H - -#include -#include -#include -#include "triton/codegen/analysis/liveness.h" - -namespace triton{ - -namespace ir{ - class value; - class function; - class module; -} - -namespace codegen{ -namespace analysis{ - -class tiles; - -class liveness; -class cts; - -class allocation { -public: - allocation(liveness *live) - : liveness_(live) { } - // accessors - bool has_offset(const data_layout *x) const { return offsets_.find(x) != offsets_.end(); } - unsigned offset(const data_layout *x) const { return offsets_.at(x); } - unsigned allocated_size() const { return allocated_size_; } - // run - void run(ir::module& mod); - -private: - std::map offsets_; - size_t allocated_size_; - // dependences - liveness *liveness_; -}; - -} -} -} - -#endif diff --git a/include/triton/codegen/analysis/axes.h b/include/triton/codegen/analysis/axes.h deleted file mode 100644 index 9e8570b5c1c6..000000000000 --- a/include/triton/codegen/analysis/axes.h +++ /dev/null @@ -1,53 +0,0 @@ -#ifndef _TRITON_CODEGEN_ANALYSIS_AXES_H_ -#define _TRITON_CODEGEN_ANALYSIS_AXES_H_ - -#include "triton/tools/graph.h" -#include -#include - -namespace triton{ - -namespace ir{ - class value; - class module; - class instruction; -} - -namespace codegen{ -namespace analysis{ - -class axes { - typedef std::pair node_t; - -private: - // update graph - void update_graph_store(ir::instruction *i); - void update_graph_reduce(ir::instruction *i); - void update_graph_reshape(ir::instruction *i); - void update_graph_trans(ir::instruction *i); - void update_graph_dequantize(ir::instruction *i); - void update_graph_broadcast(ir::instruction *i); - void update_graph_dot(ir::instruction *i); - void update_graph_elementwise(ir::instruction *i, - bool is_masked_load_async=false); - void update_graph_no_edge(ir::instruction *i); - void update_graph(ir::instruction *i); - -public: - axes(); - void run(ir::module &mod); - // accessors - int get(ir::value *value, unsigned dim); - std::vector get(ir::value *value); - -private: - tools::graph graph_; - std::map axes_; -}; - -} -} - -} - -#endif diff --git a/include/triton/codegen/analysis/layout.h b/include/triton/codegen/analysis/layout.h deleted file mode 100644 index 39d40511efe0..000000000000 --- a/include/triton/codegen/analysis/layout.h +++ /dev/null @@ -1,370 +0,0 @@ -#ifndef _TRITON_CODEGEN_ANALYSIS_GRID_H_ -#define _TRITON_CODEGEN_ANALYSIS_GRID_H_ - -#include -#include -#include -#include -#include "triton/tools/graph.h" -#include "triton/codegen/target.h" - -namespace triton{ - -namespace ir{ - class value; - class type; - class module; - class instruction; - class phi_node; -} - -namespace codegen{ -namespace analysis{ - -class axes; -class align; -class layout_visitor; -class data_layout; -class mma_layout; -class scanline_layout; -class shared_layout; - - -class layout_visitor { -public: - virtual void visit_layout(data_layout *); - virtual void visit_layout_mma(mma_layout*) = 0; - virtual void visit_layout_scanline(scanline_layout*) = 0; - virtual void visit_layout_shared(shared_layout*) = 0; -}; - -class data_layout { -protected: - enum id_t { - MMA, - SCANLINE, - SHARED - }; - - typedef std::vector axes_t; - typedef std::vector shape_t; - typedef std::vector order_t; - typedef std::vector values_t; - -private: - template - T* downcast(id_t id) { - if(id_ == id) - return static_cast(this); - return nullptr; - } - -public: - data_layout(id_t id, - const std::vector& axes, - const std::vector &shape, - const std::vector &values, - analysis::align* align); - // visitor - virtual void accept(layout_visitor* vst) = 0; - // downcast - mma_layout* to_mma() { return downcast(MMA); } - scanline_layout* to_scanline() { return downcast(SCANLINE); } - shared_layout* to_shared() { return downcast(SHARED); } - // accessors - size_t get_rank() { return shape_.size(); } - const shape_t& get_shape() const { return shape_; } - const order_t& get_order() const { return order_; } - const values_t& get_values() const { return values_;} - int get_axis(size_t k) const { return axes_.at(k); } - std::vector get_axes() const { return axes_; } - const int get_order(size_t k) const { return order_.at(k); } - // find the position of given axis - int find_axis(int to_find) const; - - -private: - id_t id_; - axes_t axes_; - values_t values_; - -protected: - order_t order_; - shape_t shape_; -}; - -class distributed_layout: public data_layout{ -public: - distributed_layout(id_t id, - const std::vector& axes, - const std::vector& shape, - const std::vector& values, - analysis::align* align); - - int shape_per_cta(size_t k) { return shape_per_cta_.at(k); } - int rep_per_cta(size_t k) { return shape_[k] / shape_per_cta_[k]; } - virtual int contig_per_thread(size_t k) = 0; - -protected: - std::vector shape_per_cta_; -}; - -class mma_layout: public distributed_layout { -public: - enum TensorCoreType : uint8_t { - // floating-point tensor core instr - FP32_FP16_FP16_FP32 = 0, // default - FP32_BF16_BF16_FP32, - FP32_TF32_TF32_FP32, - // integer tensor core instr - INT32_INT1_INT1_INT32, // Not implemented - INT32_INT4_INT4_INT32, // Not implemented - INT32_INT8_INT8_INT32, // Not implemented - // - NOT_APPLICABLE, - }; - - // Used on nvidia GPUs with sm >= 80 - inline static const std::map> mma_instr_shape_ = { - {FP32_FP16_FP16_FP32, {16, 8, 16}}, - {FP32_BF16_BF16_FP32, {16, 8, 16}}, - {FP32_TF32_TF32_FP32, {16, 8, 8}}, - - {INT32_INT1_INT1_INT32, {16, 8, 256}}, - {INT32_INT4_INT4_INT32, {16, 8, 64}}, - {INT32_INT8_INT8_INT32, {16, 8, 32}}, - }; - - // shape of matrices loaded by ldmatrix (m-n-k, for mxk & kxn matrices) - inline static const std::map> mma_mat_shape_ = { - {FP32_FP16_FP16_FP32, {8, 8, 8}}, - {FP32_BF16_BF16_FP32, {8, 8, 8}}, - {FP32_TF32_TF32_FP32, {8, 8, 4}}, - - {INT32_INT1_INT1_INT32, {8, 8, 64}}, - {INT32_INT4_INT4_INT32, {8, 8, 32}}, - {INT32_INT8_INT8_INT32, {8, 8, 16}}, - }; - - inline static const std::map mma_instr_ptx_ = { - {FP32_FP16_FP16_FP32, "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32"}, - {FP32_BF16_BF16_FP32, "mma.sync.aligned.m16n8k16.row.col.f32.bf16.bf16.f32"}, - {FP32_TF32_TF32_FP32, "mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32"}, - - {INT32_INT1_INT1_INT32, "mma.sync.aligned.m16n8k256.row.col.s32.b1.b1.s32.xor.popc"}, - {INT32_INT4_INT4_INT32, "mma.sync.aligned.m16n8k64.row.col.satfinite.s32.s4.s4.s32"}, - {INT32_INT8_INT8_INT32, "mma.sync.aligned.m16n8k32.row.col.satfinite.s32.s8.s8.s32"}, - }; - - // vector length per ldmatrix (16*8/elelment_size_in_bits) - inline static const std::map mma_instr_vec_ = { - {FP32_FP16_FP16_FP32, 8}, - {FP32_BF16_BF16_FP32, 8}, - {FP32_TF32_TF32_FP32, 4}, - - {INT32_INT1_INT1_INT32, 128}, - {INT32_INT4_INT4_INT32, 32}, - {INT32_INT8_INT8_INT32, 16}, - }; - -public: - mma_layout(size_t num_warps, - const std::vector& axes, - const std::vector& shapes, - const std::vector &values, - analysis::align* align, target *tgt, - shared_layout* layout_a, - shared_layout* layout_b, - ir::value *dot); - void accept(layout_visitor* vst) { vst->visit_layout_mma(this); } - // accessor - int fpw(size_t k) { return fpw_.at(k); } - int wpt(size_t k) { return wpt_.at(k); } - int spw(size_t k) { return spw_.at(k); } - int rep(size_t k) { return rep_.at(k); } - int contig_per_thread(size_t k) { return contig_per_thread_.at(k); } - - // helpers for generator.cc - std::string get_ptx_instr() const { return mma_instr_ptx_.at(tensor_core_type_); } - std::vector get_mma_instr_shape() const { return mma_instr_shape_.at(tensor_core_type_); } - std::vector get_mma_mat_shape() const { return mma_mat_shape_.at(tensor_core_type_); } - int get_vec_a() const { return mma_instr_vec_.at(tensor_core_type_); } - int get_vec_b() const { return mma_instr_vec_.at(tensor_core_type_); } - - // setter - void set_tensor_core_type(TensorCoreType type) { tensor_core_type_ = type; } - -private: - // fragment per warp - std::vector fpw_; - // shape per warp - std::vector spw_; - // warp per tile - std::vector wpt_; - // shape per tile - std::vector spt_; - // repetitions - std::vector rep_; - // contiguous per thread - std::vector contig_per_thread_; - - TensorCoreType tensor_core_type_ = FP32_FP16_FP16_FP32; -}; - -class scanline_layout: public distributed_layout { -public: - scanline_layout(size_t num_warps, - const std::vector& axes, - const std::vector& shape, - const std::vector &values, - analysis::align* align, - target* tgt); - void accept(layout_visitor* vst) { vst->visit_layout_scanline(this); } - // accessor - int mts(size_t k) { return mts_.at(k); } - int nts(size_t k) { return nts_.at(k); } - int contig_per_thread(size_t k) { return nts_.at(k); } - - int per_thread(size_t k) { return contig_per_thread(k) * shape_[k] / shape_per_cta(k);} -private: - // micro tile size. The size of a tile held by a thread block. - std::vector mts_; - // nano tile size. The size of a tile held by a thread. - std::vector nts_; -}; - -struct double_buffer_info_t { - ir::value* first; - ir::value* latch; - ir::phi_node* phi; -}; - -struct N_buffer_info_t { - std::vector firsts; // not necessarily ordered as input order - ir::value* latch; - ir::phi_node* phi; - std::map firsts_idx; -}; - -// abstract for dot and corresponding smem values -class shared_layout: public data_layout { -private: - static bool is_loop_latch(ir::phi_node *phi, ir::instruction *terminator); - static void extract_double_bufferable(ir::value *v, std::shared_ptr& res); - static void extract_N_bufferable(ir::value *v, std::shared_ptr& res, int &prev_stages); - -public: - shared_layout(data_layout *arg, - const std::vector& axes, - const std::vector& shapes, - const std::vector &values_, - ir::type *ty, - analysis::align* align, target *tgt, - bool is_tmp = false); - void accept(layout_visitor* vst) { vst->visit_layout_shared(this); } - // accessors - size_t get_size() { return size_; } - ir::type* get_type() { return ty_; } - double_buffer_info_t* get_double_buffer() { return double_buffer_.get(); } - N_buffer_info_t* get_N_buffer() { return N_buffer_.get(); } - int get_num_stages() const; - size_t get_per_stage_size() const { return size_ / get_num_stages(); } - size_t get_per_stage_elements() const; - size_t get_num_per_phase() { return num_per_phase_; } - ir::value* hmma_dot_a() { return hmma_dot_a_; } - ir::value* hmma_dot_b() { return hmma_dot_b_; } - void set_mma_vec(int mma_vec) { mma_vec_ = mma_vec; } - int get_mma_vec() { return mma_vec_;} - int get_mma_strided() { return mma_strided_; } - bool allow_swizzle() const { return allow_swizzle_; } - data_layout* get_arg_layout() { return arg_layout_; } - bool is_tmp() const { return is_tmp_; } - -private: - size_t size_; - ir::type *ty_; - std::shared_ptr double_buffer_; - std::shared_ptr N_buffer_; - size_t num_per_phase_; - ir::value* hmma_dot_a_; - ir::value* hmma_dot_b_; - data_layout* arg_layout_; - int mma_vec_; - int mma_strided_; - bool allow_swizzle_ = true; - target *tgt_; - bool is_tmp_; -}; - - - -class layouts { - typedef ir::value* node_t; - typedef std::map > graph_t; - -private: - // graph creation - void connect(ir::value *x, ir::value *y); - void make_graph(ir::instruction *i); - - void init_hmma_tile(data_layout& layouts); - void init_scanline_tile(data_layout &layouts); - - void create(size_t id, const std::vector& values); - - void create_tmp_layout(size_t id, data_layout* arg, - const std::vector& axes, - const std::vector& shape, - ir::instruction* i, - bool is_index = false); - - public: - // constructor - layouts(analysis::axes *axes, analysis::align *align, size_t num_warps, target* tgt); - - // accessors - unsigned layout_of(ir::value *value) const { return groups_.at(value); } - bool has(ir::value* value) const { return groups_.find(value) != groups_.end(); } - bool has(size_t id) { return layouts_.find(id) != layouts_.end(); } - const std::vector& values_of(unsigned id) const { return values_.at(id); } - size_t num_layouts() const { return values_.size();} - data_layout* get(size_t id) { return layouts_.at(id); } - data_layout* get(ir::value *v) { return get(layout_of(v));} - std::map &get_all() { return layouts_; } - bool has_tmp(ir::value* i) { return tmp_.find(i) != tmp_.end(); } - int tmp(ir::value* i) { return tmp_.at(i);} - int has_tmp_index(ir::value* i) { return tmp_index_.find(i) != tmp_index_.end(); } - int tmp_index(ir::value* i) { return tmp_index_.at(i);} - void copy(ir::value* dst, ir::value* src) { groups_[dst] = groups_[src]; } - - // layout checkers - bool is_scanline(ir::instruction* i); - - bool is_coalesced_scanline(ir::instruction* i); - - bool is_mma(ir::instruction* i); - - bool is_a100_mma(ir::instruction* i); - - // execution - void run(ir::module &mod); - -private: - analysis::axes* axes_; - analysis::align* align_; - size_t num_warps_; - target* tgt_; - tools::graph graph_; - std::map groups_; - std::map> values_; - std::map layouts_; - std::map tmp_; - std::map tmp_index_; -}; - -} -} - -} - -#endif diff --git a/include/triton/codegen/analysis/liveness.h b/include/triton/codegen/analysis/liveness.h deleted file mode 100644 index 12232b6548ee..000000000000 --- a/include/triton/codegen/analysis/liveness.h +++ /dev/null @@ -1,69 +0,0 @@ -#ifndef TDL_INCLUDE_IR_CODEGEN_LIVENESS_H -#define TDL_INCLUDE_IR_CODEGEN_LIVENESS_H - -#include "triton/codegen/analysis/layout.h" -#include "triton/tools/graph.h" - -#include "llvm/ADT/MapVector.h" - -#include -#include - -namespace triton{ - -namespace ir{ - class value; - class phi_node; - class function; - class module; - class instruction; -} - -namespace codegen{ -namespace analysis{ - -typedef unsigned slot_index; - -class tiles; -class layouts; -class data_layout; - -struct segment { - slot_index start; - slot_index end; - - bool contains(slot_index idx) const { - return start <= idx && idx < end; - } - - bool intersect(const segment &Other){ - return contains(Other.start) || Other.contains(start); - } -}; - - -class liveness { -private: - typedef llvm::MapVector intervals_map_t; - -public: - // constructor - liveness(layouts *l): layouts_(l){ } - // accessors - const intervals_map_t& get() const { return intervals_; } - segment get(shared_layout* v) const { return intervals_.lookup(v); } - // run - void run(ir::module &mod); - -private: - // analysis - layouts *layouts_; - intervals_map_t intervals_; -}; - -} -} -} - - -#endif diff --git a/include/triton/codegen/analysis/swizzle.h b/include/triton/codegen/analysis/swizzle.h deleted file mode 100644 index 6f2833a6851b..000000000000 --- a/include/triton/codegen/analysis/swizzle.h +++ /dev/null @@ -1,43 +0,0 @@ -#ifndef TRITON_INCLUDE_IR_CODEGEN_SWIZZLE_H -#define TRITON_INCLUDE_IR_CODEGEN_SWIZZLE_H - -#include - -namespace triton{ - -namespace ir{ - class module; -} - -namespace codegen{ -class target; - -namespace analysis{ - -class layouts; -class data_layout; - -class swizzle { -public: - // constructor - swizzle(layouts *l, target* tgt): layouts_(l), tgt_(tgt){ } - // accessors - int get_per_phase(data_layout* layout) { return per_phase_.at(layout); } - int get_max_phase(data_layout* layout) { return max_phase_.at(layout); } - int get_vec (data_layout* layout) { return vec_.at(layout); } - // run - void run(ir::module &mod); -private: - layouts* layouts_; - target* tgt_; - std::map per_phase_; - std::map max_phase_; - std::map vec_; -}; - -} -} -} - - -#endif diff --git a/include/triton/codegen/extern_lib.h b/include/triton/codegen/extern_lib.h deleted file mode 100644 index 02e991407469..000000000000 --- a/include/triton/codegen/extern_lib.h +++ /dev/null @@ -1,90 +0,0 @@ -#ifndef _TRITON_CODE_GEN_EXTERN_LIB_H_ -#define _TRITON_CODE_GEN_EXTERN_LIB_H_ - -#include -#include -#include - -#include "llvm/IR/LLVMContext.h" -#include "llvm/IR/Module.h" -#include "llvm/IRReader/IRReader.h" -#include "llvm/Support/SourceMgr.h" - -namespace triton { -namespace codegen { - -/// -/// \brief ExternLib is a class that represents a library of external functions. -/// -class ExternLib { - public: - ExternLib(const std::string &name, const std::string &path) - : name_(name), path_(path) {} - - virtual ~ExternLib() = default; - - virtual const std::string &name() const { return name_; } - - virtual const std::string &path() const { return path_; } - - /// - /// \brief Load the library and return the module. - /// - std::unique_ptr load(llvm::LLVMContext &ctx); - - /// - /// \brief Link the module into the given module. - /// - void link(std::unique_ptr &llvm, - std::unique_ptr &mod); - - /// - /// \brief Run load, link, and opt on the module. - /// - virtual void install(llvm::LLVMContext &ctx, - std::unique_ptr &llvm) { - auto mod = load(ctx); - link(llvm, mod); - opt(ctx, llvm); - } - - /// - /// \brief Run opt on the module. - /// - virtual void opt(llvm::LLVMContext &ctx, - std::unique_ptr &llvm) = 0; - - private: - std::string name_; - std::string path_; -}; - -/// -/// \brief ExternLibMap is a map of ExternLibs from their names to their paths. -/// -typedef std::map> ExternLibMap; - -/// -/// \brief Concrete class for NVIDIA's libdevice library. -/// -class LibDevice final : public ExternLib { - public: - LibDevice(const std::string &name, const std::string &path) - : ExternLib(name, path) {} - - virtual ~LibDevice() = default; - - virtual void opt(llvm::LLVMContext &ctx, - std::unique_ptr &llvm) override; -}; - -/// -/// \brief Create an ExternLib instance based on the name and path. -/// -std::unique_ptr create_extern_lib(const std::string &lib_name, - const std::string &lib_path); - -} // namespace codegen -} // namespace triton - -#endif diff --git a/include/triton/codegen/pass.h b/include/triton/codegen/pass.h deleted file mode 100644 index 95b00b8070e5..000000000000 --- a/include/triton/codegen/pass.h +++ /dev/null @@ -1,41 +0,0 @@ -#ifndef _TRITON_CODEGEN_PASS_H_ -#define _TRITON_CODEGEN_PASS_H_ - - -#include -#include "extern_lib.h" - -namespace llvm{ - class Module; - class LLVMContext; -} - -namespace triton{ - -namespace codegen { - class target; -} - -namespace ir{ - class module; -} -namespace driver{ - class device; - class module; - class kernel; -} -} - -namespace triton{ -namespace codegen{ - -// TODO: -// There should be a proper pass manager there! -std::unique_ptr add_passes_to_emit_bin( - ir::module &ir, llvm::LLVMContext &ctx, codegen::target *target, - int num_warps, int num_stages, int &shared_static, - const ExternLibMap &extern_libs); -} -} - -#endif diff --git a/include/triton/codegen/selection/generator.h b/include/triton/codegen/selection/generator.h deleted file mode 100644 index cf7dacb095e0..000000000000 --- a/include/triton/codegen/selection/generator.h +++ /dev/null @@ -1,300 +0,0 @@ -#pragma once - -#ifndef _TRITON_SELECTION_GENERATOR_H_ -#define _TRITON_SELECTION_GENERATOR_H_ - -#include "triton/ir/visitor.h" -#include "triton/ir/instructions.h" -#include "triton/codegen/analysis/layout.h" -#include "triton/codegen/extern_lib.h" -#include - -// forward -namespace llvm{ - class Type; - class Value; - class PHINode; - class BasicBlock; - class Attribute; - class Instruction; - class Constant; - class LLVMContext; - class Module; - class ConstantFolder; - class IRBuilderDefaultInserter; - template - class IRBuilder; - class ArrayType; - class Function; - class StructType; -} - -namespace triton{ - -namespace ir{ -class attribute; -class load_inst; -class store_inst; -} - -namespace codegen{ - -// forward -namespace analysis{ -class liveness; -class tiles; -class align; -class allocation; -class cts; -class axes; -class layouts; -class swizzle; -} -// typedef -typedef llvm::IRBuilder Builder; -typedef llvm::LLVMContext LLVMContext; -typedef llvm::Type Type; -typedef llvm::Value Value; -typedef llvm::Attribute Attribute; -typedef llvm::BasicBlock BasicBlock; -typedef llvm::Module Module; -typedef llvm::Instruction Instruction; -typedef llvm::Constant Constant; -typedef llvm::ArrayType ArrayType; -typedef llvm::Function Function; -typedef std::vector indices_t; -class target; - -} -} - -namespace triton{ -namespace codegen{ - -struct distributed_axis { - int contiguous; - std::vector values; - Value* thread_id; -}; - -class adder{ -public: - adder(Builder** builder): builder_(builder) { } - Value* operator()(Value* x, Value* y, const std::string& name = ""); - -private: - Builder** builder_; -}; - -class multiplier{ -public: - multiplier(Builder** builder): builder_(builder) { } - Value* operator()(Value* x, Value* y, const std::string& name = ""); -private: - Builder** builder_; -}; - -class geper{ -public: - geper(Builder** builder): builder_(builder) { } - Value* operator()(Value *ptr, Value* off, const std::string& name = ""); - Value* operator()(Type* ty, Value*ptr, std::vector vals, const std::string& name = ""); - -private: - Builder** builder_; -}; - -class generator: public ir::visitor, public analysis::layout_visitor { -private: - void init_idx(ir::value *x); - Instruction* add_barrier(); - Value* shared_off(const std::vector& shapes, const std::vector& order, indices_t idx); - void finalize_shared_layout(analysis::shared_layout*); - void finalize_function(ir::function*); - void finalize_phi_node(ir::phi_node*); - -private: - Type *cvt(ir::type *ty); - llvm::Attribute cvt(ir::attribute attr); - void packed_type(ir::value* i); - void forward_declare(ir::function* fn); - Value *cast_shared_layout_ptr(analysis::data_layout *layout, Type *ty); - - private: - typedef std::function &acc, std::function load_value_fn, - std::function load_index_fn, bool is_first)> - acc_fn_t; - - public: - generator(analysis::axes *a_axes, - analysis::layouts *layouts, - analysis::align *alignment, - analysis::allocation *alloc, - analysis::swizzle *swizzle, - target *tgt, - unsigned num_warps); - - void visit_value(ir::value* v); - void visit_call_inst(ir::call_inst*); - void visit_launch_inst(ir::launch_inst *); - void visit_phi_node(ir::phi_node*); - void visit_binary_operator(ir::binary_operator*); - void visit_getelementptr_inst(ir::getelementptr_inst*); - void visit_icmp_inst(ir::icmp_inst*); - void visit_fcmp_inst(ir::fcmp_inst*); - std::tuple fp8x4_to_fp32x4(Value *in0, Value *in1, Value *in2, Value *in3); - std::tuple fp32x4_to_fp8x4(Value *in0, Value *in1, Value *in2, Value *in3); - std::tuple fp8x4_to_fp16x4(Value *in0, Value *in1, Value *in2, Value *in3); - std::tuple fp16x4_to_fp8x4(Value *in0, Value *in1, Value *in2, Value *in3); - std::tuple fp8x4_to_bf16x4(Value *in0, Value *in1, Value *in2, Value *in3); - std::tuple bf16x4_to_fp8x4(Value *in0, Value *in1, Value *in2, Value *in3); - Value* bf16_to_fp32(Value *in0); - Value* fp32_to_bf16(Value *in0); - std::tuple int16_to_float16x8( - Value *in0, Value *scale_x512, Value *shift - ); - std::tuple int32_to_float16x8( - Value *in0, Value *scale_x512, Value *shift - ); - std::tuple int32_to_float16x4(Value *in0, Value *scale_x512, Value *shift); - std::tuple prepare_scale_shift(Value *scale, Value *shift); - void visit_dequantize_inst(ir::dequantize_inst*); - void visit_cast_inst(ir::cast_inst*); - void visit_return_inst(ir::return_inst*); - void visit_cond_branch_inst(ir::cond_branch_inst*); - void visit_uncond_branch_inst(ir::uncond_branch_inst*); - void visit_load_inst(ir::load_inst*); - void visit_unmasked_load_inst(ir::unmasked_load_inst*); - void visit_masked_load_inst(ir::masked_load_inst*); - void visit_store_inst(ir::store_inst*); - void visit_unmasked_store_inst(ir::unmasked_store_inst*); - void visit_masked_store_inst(ir::masked_store_inst*); - void visit_cat_inst(ir::cat_inst*); - void visit_extract_value_inst(ir::extract_value_inst *); - void visit_insert_value_inst(ir::insert_value_inst *); - void visit_reshape_inst(ir::reshape_inst*); - void visit_splat_inst(ir::splat_inst*); - void visit_broadcast_inst(ir::broadcast_inst*); - void visit_downcast_inst(ir::downcast_inst*); - void visit_exp_inst(ir::exp_inst*); - void visit_cos_inst(ir::cos_inst*); - void visit_umulhi_inst(ir::umulhi_inst* x); - void visit_sin_inst(ir::sin_inst*); - void visit_log_inst(ir::log_inst*); - void visit_get_program_id_inst(ir::get_program_id_inst*); - void visit_get_num_programs_inst(ir::get_num_programs_inst*); - void visit_atomic_cas_inst(ir::atomic_cas_inst*); - void visit_atomic_rmw_inst(ir::atomic_rmw_inst*); - void visit_mma884(ir::dot_inst*, ir::value *A, ir::value *B, ir::value *D, unsigned NK); - void visit_mma16816(ir::dot_inst*, ir::value *A, ir::value *B, ir::value *D, unsigned NK); - void visit_fmadot(ir::dot_inst*, ir::value *A, ir::value *B, ir::value *D, unsigned NK, Type *c_ty, Function *f_mul_add); - void visit_dot_inst(ir::dot_inst*); - void visit_trans_inst(ir::trans_inst*); - void visit_sqrt_inst(ir::sqrt_inst*); - Value* shfl_sync(Value* acc, int32_t i); - void visit_reducend_inst_fast(ir::reduce_inst* x, acc_fn_t do_acc, Value *neutral); - void visit_reducend_inst(ir::reduce_inst* x, acc_fn_t do_acc, Value *neutral); - void visit_reduce_inst(ir::reduce_inst*); - void visit_select_inst(ir::select_inst*); - void visit_layout_convert(ir::value *out, ir::value *in); - void visit_cvt_layout_inst(ir::cvt_layout_inst*); - void visit_masked_load_async_inst(ir::masked_load_async_inst*); - void visit_copy_to_shared_inst(ir::copy_to_shared_inst*); - void visit_copy_from_shared_inst(ir::copy_from_shared_inst*); - void visit_barrier_inst(ir::barrier_inst*); - void visit_prefetch_s_inst(ir::prefetch_s_inst*); - void visit_async_wait_inst(ir::async_wait_inst*); -// void visit_make_range_dyn(ir::make_range_dyn*); - void visit_make_range(ir::make_range*); - void visit_clock_inst(ir::clock_inst*); - void visit_globaltimer_inst(ir::globaltimer_inst*); - void visit_extern_elementwise_inst(ir::extern_elementwise_inst*); -// void visit_make_range_sta(ir::make_range_sta*); - void visit_undef_value(ir::undef_value*); - void visit_constant_int(ir::constant_int*); - void visit_constant_fp(ir::constant_fp*); - void visit_alloc_const(ir::alloc_const*); - void visit_function(ir::function*); - void visit_basic_block(ir::basic_block*); - void visit_argument(ir::argument*); - void visit(ir::module &, llvm::Module &); - - // layouts - void visit_layout_mma(analysis::mma_layout*); - void visit_layout_scanline(analysis::scanline_layout*); - void visit_layout_shared(analysis::shared_layout*); - - // Add a new external library based on given name and path if it doesn't exist - void add_extern_lib(const std::string &lib_name, const std::string &lib_path); - - // Get all external libraries - const ExternLibMap &get_extern_lib_map() { - return extern_lib_map_; - } - - private: - LLVMContext *ctx_; - Builder* builder_; - Module *mod_; - - std::map> extern_lib_map_; - - analysis::axes *a_axes_; - analysis::swizzle *swizzle_; - std::map axes_; - target *tgt_; - analysis::layouts *layouts_; - analysis::align *alignment_; - analysis::allocation *alloc_; - Value *shmem_; - std::set seen_; - - unsigned num_warps_; - - std::map offset_a_m_; - std::map offset_a_k_; - std::map offset_b_k_; - std::map offset_b_n_; - - /// layout -> base ptr - std::map shared_ptr_; - std::map shared_pre_ptr_; - std::map shared_next_ptr_; - /// offset for double-buffered layout - std::map shared_off_; - - /// Base shmem pointer of ir value - std::map shmems_; - std::map shoffs_; - std::map> idxs_; - std::map> vals_; - /// idx for multi-stage pipeline - std::map read_smem_idx_; - std::map write_smem_idx_; - - /// triton bb -> llvm bb - std::map bbs_; - std::map> ords_; - std::map fns_; - - // helper for creating llvm values - adder add; - multiplier mul; - geper gep; - - /// PHI nodes - std::vector> lazy_phi_incs_; - - /// Record prefetch instrs that needs to be moved - std::map> prefetch_latch_to_bb_; - - // Eviction policies - std::map policies_; -}; - -} -} - -#endif diff --git a/include/triton/codegen/target.h b/include/triton/codegen/target.h deleted file mode 100644 index 96e4d5c31dde..000000000000 --- a/include/triton/codegen/target.h +++ /dev/null @@ -1,105 +0,0 @@ -#ifndef TDL_INCLUDE_IR_CODEGEN_TARGET_H -#define TDL_INCLUDE_IR_CODEGEN_TARGET_H - -namespace llvm{ - class Type; - class Value; - class Instruction; - class Constant; - class LLVMContext; - class Module; - class ConstantFolder; - class IRBuilderDefaultInserter; - template - class IRBuilder; - class ArrayType; - class Function; -} - -// typedefs -namespace triton{ -namespace codegen{ - typedef llvm::IRBuilder Builder; - typedef llvm::LLVMContext LLVMContext; - typedef llvm::Type Type; - typedef llvm::Value Value; - typedef llvm::Module Module; - typedef llvm::Instruction Instruction; - typedef llvm::Constant Constant; - typedef llvm::ArrayType ArrayType; - typedef llvm::Function Function; -} -} - -namespace triton{ -namespace codegen{ - -class nvidia_cu_target; - -class target { -public: - target(bool is_gpu): is_gpu_(is_gpu){} - virtual ~target() {} - virtual void set_kernel(Builder& builder, LLVMContext &ctx, Module *module, Function* fn) = 0; - virtual Instruction* add_barrier(Module *module, Builder& builder) = 0; - virtual Instruction* add_memfence(Module *module, Builder& builder) = 0; - virtual Value* get_global_offset(Module *module, Builder& builder, unsigned stride, unsigned ax) = 0; - virtual Value* get_local_id(Module *module, Builder& builder, unsigned ax) = 0; - virtual Value* get_block_id(Module *module, Builder& builder, unsigned ax) = 0; - virtual Value* get_num_blocks(Module *module, Builder& builder, unsigned ax) = 0; - virtual unsigned guaranteed_alignment() = 0; - nvidia_cu_target* as_nvidia(); - bool is_gpu() const; - -private: - bool is_gpu_; -}; - -class amd_cl_target: public target { -public: - amd_cl_target(): target(true){} - void set_kernel(Builder& builder, LLVMContext &ctx, Module *module, Function* fn); - Instruction* add_barrier(Module *module, Builder& builder); - Instruction* add_memfence(Module *module, Builder& builder); - Value* get_global_offset(Module *module, Builder& builder, unsigned stride, unsigned ax); - Value* get_local_id(Module *module, Builder& builder, unsigned ax); - Value* get_block_id(Module *module, Builder& builder, unsigned ax); - Value* get_num_blocks(Module *module, Builder& builder, unsigned ax); - unsigned guaranteed_alignment() { return 16; } -}; - -class nvidia_cu_target: public target { -public: - nvidia_cu_target(int sm): target(true), sm_(sm){} - void set_kernel(Builder& builder, LLVMContext &ctx, Module *module, Function* fn); - Instruction* add_barrier(Module *module, Builder& builder); - Instruction* add_memfence(Module *module, Builder& builder); - Value* get_global_offset(Module *module, Builder& builder, unsigned stride, unsigned ax); - Value* get_local_id(Module *module, Builder& builder, unsigned ax); - Value* get_block_id(Module *module, Builder& builder, unsigned ax); - Value* get_num_blocks(Module *module, Builder& builder, unsigned ax); - int sm() { return sm_; } - unsigned guaranteed_alignment() { return 16; } - -private: - int sm_; -}; - -class cpu_target: public target { -public: - cpu_target(): target(false){} - void set_kernel(Builder& builder, LLVMContext &ctx, Module *module, Function* fn); - Instruction* add_barrier(Module *module, Builder& builder); - Instruction* add_memfence(Module *module, Builder& builder); - Value* get_global_offset(Module *module, Builder& builder, unsigned stride, unsigned ax); - Value* get_local_id(Module *module, Builder& builder, unsigned ax); - Value* get_block_id(Module *module, Builder& builder, unsigned ax); - Value* get_num_blocks(Module *module, Builder& builder, unsigned ax); - unsigned guaranteed_alignment() { return 1; } -}; - -} -} - -#endif diff --git a/include/triton/codegen/transform/coalesce.h b/include/triton/codegen/transform/coalesce.h deleted file mode 100644 index e16ffe5fea8f..000000000000 --- a/include/triton/codegen/transform/coalesce.h +++ /dev/null @@ -1,49 +0,0 @@ -#ifndef TDL_INCLUDE_CODEGEN_OPTIMIZE_REORDER_H -#define TDL_INCLUDE_CODEGEN_OPTIMIZE_REORDER_H - -#include -#include -#include - -namespace triton { - -namespace ir { - class module; - class value; - class io_inst; - class instruction; - class builder; -} - -namespace codegen{ - -namespace analysis{ - class align; - class layouts; - class cts; -} - -namespace transform{ - -class coalesce { -private: - void extract_io_use(ir::value *v, std::set& result); - void extract_ld(ir::io_inst *i, std::map > &result); - ir::value* rematerialize(ir::value *v, ir::builder& builder, std::map& seen); - -public: - coalesce(analysis::align* align, triton::codegen::analysis::layouts *layouts, bool has_sm80); - triton::ir::value *simplify(ir::instruction* i, triton::ir::builder &builder); - void run(ir::module &mod); - -private: - bool has_sm80_; - analysis::align* align_; - analysis::layouts* layout_; -}; - -} -} -} - -#endif diff --git a/include/triton/codegen/transform/cts.h b/include/triton/codegen/transform/cts.h deleted file mode 100644 index 30b421b52355..000000000000 --- a/include/triton/codegen/transform/cts.h +++ /dev/null @@ -1,44 +0,0 @@ -#ifndef TDL_INCLUDE_CODEGEN_BUFFER_INFO_PASS_H -#define TDL_INCLUDE_CODEGEN_BUFFER_INFO_PASS_H - -#include -#include - -namespace triton { - -namespace ir { - class module; - class value; - class phi_node; - class instruction; - class builder; -} - -namespace codegen{ - -namespace analysis{ -class layouts; -} - -namespace transform{ - -class cts { -private: - bool is_shmem_op(ir::instruction* i, int op); - bool is_shmem_res(ir::value* i); -void add_copy(ir::instruction *parent, ir::value *x, ir::builder &builder, bool to_shared, std::map& copies); - -public: - cts(analysis::layouts* layouts, bool has_sm80 = false): layouts_(layouts), has_sm80_(has_sm80) {} - void run(ir::module &mod); - -private: - bool has_sm80_; - analysis::layouts* layouts_; -}; - -} -} -} - -#endif \ No newline at end of file diff --git a/include/triton/codegen/transform/dce.h b/include/triton/codegen/transform/dce.h deleted file mode 100644 index 8bed0afef4f6..000000000000 --- a/include/triton/codegen/transform/dce.h +++ /dev/null @@ -1,24 +0,0 @@ -#ifndef TDL_INCLUDE_CODEGEN_OPTIMIZE_CSE_H -#define TDL_INCLUDE_CODEGEN_OPTIMIZE_CSE_H - - -namespace triton { - -namespace ir { - class module; -} - -namespace codegen{ -namespace transform{ - -class dce { -public: - dce() {} - void run(ir::module &mod); -}; - -} -} -} - -#endif diff --git a/include/triton/codegen/transform/disassociate.h b/include/triton/codegen/transform/disassociate.h deleted file mode 100644 index f2363f3fe2f6..000000000000 --- a/include/triton/codegen/transform/disassociate.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _TRITON_SELECTION_TRANSFORM_DISASSOCIATE_H_ -#define _TRITON_SELECTION_TRANSFORM_DISASSOCIATE_H_ - - -namespace triton { -namespace ir { - class module; -} - -namespace codegen{ -namespace transform{ - -class disassociate { -public: - void run(ir::module &mod); -}; - -} -} -} - -#endif diff --git a/include/triton/codegen/transform/inline.h b/include/triton/codegen/transform/inline.h deleted file mode 100644 index c79079b61223..000000000000 --- a/include/triton/codegen/transform/inline.h +++ /dev/null @@ -1,31 +0,0 @@ -#pragma once - -#include - -namespace triton { - -namespace ir { - class module; - class function; - class call_inst; - class builder; -} - -namespace codegen{ -namespace transform{ - -struct fncmp { - bool operator()(ir::function* x, ir::function* y) const; -}; - -class inliner { -public: - inliner() {} - void do_inline(ir::function* fn, ir::call_inst* callsite, ir::builder& builder, std::list& callsites); - void run(ir::module &mod); -}; - - -} -} -} diff --git a/include/triton/codegen/transform/membar.h b/include/triton/codegen/transform/membar.h deleted file mode 100644 index 21145a4fe769..000000000000 --- a/include/triton/codegen/transform/membar.h +++ /dev/null @@ -1,72 +0,0 @@ -#ifndef TDL_INCLUDE_CODEGEN_BARRIERS_H -#define TDL_INCLUDE_CODEGEN_BARRIERS_H - -#include -#include -#include -#include -#include "triton/codegen/target.h" - -namespace triton { - -namespace ir { - class module; - class basic_block; - class instruction; - class masked_load_async_inst; - class value; - class builder; -} - -namespace codegen{ - -namespace analysis{ - -class allocation; -class liveness; -class layouts; -class cts; -class shared_layout; - -} - -namespace transform{ - -class prefetch; - -class membar { -private: - typedef std::pair interval_t; - typedef std::set val_set_t; - typedef std::vector val_vec_t; - -private: - bool intersect(const val_set_t &X, const val_set_t &Y); - bool check_safe_war(ir::instruction* i); - int group_of(triton::ir::value *i, std::vector &async_write); - bool intersect_with(analysis::shared_layout* a_layout, analysis::shared_layout* b_layout); - val_set_t intersect_with(const val_set_t& as, const val_set_t& bs); - void transfer(ir::basic_block *block, val_vec_t &async_write, val_set_t &sync_write, val_set_t &sync_read, - std::set &safe_war, bool &inserted, ir::builder &builder); - -public: - membar(analysis::liveness *liveness, analysis::layouts *layouts, analysis::allocation *alloc, - transform::prefetch *prefetch, target* tgt): - liveness_(liveness), layouts_(layouts), alloc_(alloc), prefetch_(prefetch), tgt_(tgt) {} - void run(ir::module &mod); - -private: - analysis::liveness *liveness_; - analysis::layouts *layouts_; - analysis::allocation *alloc_; - transform::prefetch *prefetch_; - - target* tgt_; -}; - - -} -} -} - -#endif diff --git a/include/triton/codegen/transform/peephole.h b/include/triton/codegen/transform/peephole.h deleted file mode 100644 index 5b84a813bcea..000000000000 --- a/include/triton/codegen/transform/peephole.h +++ /dev/null @@ -1,56 +0,0 @@ -#ifndef TDL_INCLUDE_CODEGEN_OPTIMIZE_TRANS_H -#define TDL_INCLUDE_CODEGEN_OPTIMIZE_TRANS_H - -#include "triton/codegen/target.h" - -namespace triton { - -namespace ir { - class module; - class value; - class instruction; - class trans_inst; - class builder; - class constant_int; - class dot_inst; -} - -namespace codegen{ -namespace analysis{ -class layouts; -} - -namespace transform{ - -class peephole { -private: -// bool rewrite_cts_cfs(ir::instruction *value, ir::builder &builder); - bool rewrite_trans_phi(ir::instruction* value, ir::builder &builder); - bool rewrite_dot_fp32(ir::dot_inst *dot, ir::builder& builder, bool trans_a, bool trans_b, ir::value *A, ir::value *B, ir::value *D); - bool rewrite_dot_hmma(ir::dot_inst *dot, ir::builder& builder, bool trans_a, bool trans_b, ir::value *A, ir::value *B, ir::value *D); - bool rewrite_dot(ir::instruction *value, ir::builder& builder); - bool rewrite_mult(ir::instruction *value, ir::builder& builder); - bool rewrite_insert_extract(ir::instruction *value, ir::builder& builder); - - - bool rewrite_unit_red(ir::instruction *value, ir::builder& builder); - bool rewrite_gep_ptr_min_off_plus_off(ir::instruction *value, ir::builder& builder); - bool rewrite_select_masked_load(ir::instruction *value, ir::builder& builder); - bool rewrite_load_to_shared(ir::instruction *value, ir::builder& builder); - bool rewrite_cvt_layout(ir::instruction *value, ir::builder& builder); - -public: - peephole(target* tgt, analysis::layouts* layouts): tgt_(tgt), layouts_(layouts) {} - void run(ir::module &mod); - -private: - target* tgt_; - analysis::layouts* layouts_; -}; - - -} -} -} - -#endif diff --git a/include/triton/codegen/transform/pipeline.h b/include/triton/codegen/transform/pipeline.h deleted file mode 100644 index 35472de040f3..000000000000 --- a/include/triton/codegen/transform/pipeline.h +++ /dev/null @@ -1,30 +0,0 @@ -#ifndef TRITON_INCLUDE_IR_CODEGEN_PIPELINE_H -#define TRITON_INCLUDE_IR_CODEGEN_PIPELINE_H - -// forward declaration -namespace triton { -namespace ir { -class module; -} -} // namespace triton - -namespace triton { -namespace codegen { -namespace transform { - -class pipeline { -public: - pipeline(bool has_copy_async, int num_stages) - : has_copy_async_(has_copy_async), num_stages_(num_stages) {} - void run(ir::module &module); - -private: - bool has_copy_async_; - int num_stages_; -}; - -} // namespace transform -} // namespace codegen -} // namespace triton - -#endif diff --git a/include/triton/codegen/transform/prefetch.h b/include/triton/codegen/transform/prefetch.h deleted file mode 100644 index 6843b54633fc..000000000000 --- a/include/triton/codegen/transform/prefetch.h +++ /dev/null @@ -1,27 +0,0 @@ -#ifndef TRITON_INCLUDE_TRITON_CODEGEN_TRANSFORM_PREFETCH_H -#define TRITON_INCLUDE_TRITON_CODEGEN_TRANSFORM_PREFETCH_H - -#include - -// forward dclaration -namespace triton::ir{ -class module; -class value; -} - -namespace triton::codegen { -class target; -} - -namespace triton::codegen::transform { -class prefetch { - target* tgt_; - std::set prefetched_vals_; -public: - prefetch(target *tgt) : tgt_(tgt) {} - void run(ir::module &module); - bool is_prefetched(ir::value* v) { return prefetched_vals_.find(v) != prefetched_vals_.end(); } -}; -} - -#endif \ No newline at end of file diff --git a/include/triton/codegen/transform/reorder.h b/include/triton/codegen/transform/reorder.h deleted file mode 100644 index 3b48a330ff5c..000000000000 --- a/include/triton/codegen/transform/reorder.h +++ /dev/null @@ -1,26 +0,0 @@ -#ifndef TRITON_INCLUDE_IR_CODEGEN_REORDER_H -#define TRITON_INCLUDE_IR_CODEGEN_REORDER_H - -namespace triton { - -// forward declaration -namespace ir { -class module; -} - -namespace codegen{ - -namespace transform{ - -class reorder { -public: - void run(ir::module& module); -}; - -} - -} - -} - -#endif diff --git a/include/triton/driver/dispatch.h b/include/triton/driver/dispatch.h deleted file mode 100755 index 2384b4cba192..000000000000 --- a/include/triton/driver/dispatch.h +++ /dev/null @@ -1,318 +0,0 @@ -#pragma once - -#ifndef _TRITON_DRIVER_DISPATCH_H_ -#define _TRITON_DRIVER_DISPATCH_H_ - -#include -#include - -//CUDA Backend -#include "triton/external/CUDA/cuda.h" -#include "triton/external/CUDA/nvml.h" - -//// HIP backend -//#define __HIP_PLATFORM_AMD__ -#include "triton/external/hip.h" - -//Exceptions -#include -#include - -namespace llvm { -class PassRegistry; -class Module; -} - -namespace triton -{ -namespace driver -{ - -class cu_context; - -template void check(T){} -void check(CUresult err); -void check(hipError_t err); - -class dispatch -{ -protected: - template - struct return_type; - - template - struct return_type - { typedef R type; }; - - typedef bool (*f_init_t)(); - - template - static typename return_type::type f_impl(void*& lib_h, FunPtrT, void*& cache, const char * name, Args... args) - { - initializer(); - if(cache == nullptr){ - cache = dlsym(lib_h, name); - if(cache == 0) - throw std::runtime_error("dlsym unable to load function"); - } - FunPtrT fptr; - *reinterpret_cast(&fptr) = cache; - typename return_type::type res = (*fptr)(args...); - check(res); - return res; - } - -public: - static void release(); - // Nvidia - static bool nvmlinit(); - static bool cuinit(); - // AMD - static bool hipinit(); - - /* ------------------- * - * CUDA - * ------------------- */ - // context management - static CUresult cuInit(unsigned int Flags); - static CUresult cuCtxDestroy_v2(CUcontext ctx); - static CUresult cuCtxCreate_v2(CUcontext *pctx, unsigned int flags, CUdevice dev); - static CUresult cuCtxPushCurrent_v2(CUcontext ctx); - static CUresult cuCtxPopCurrent_v2(CUcontext *pctx); - static CUresult cuCtxGetDevice(CUdevice* result); - static CUresult cuCtxEnablePeerAccess(CUcontext peerContext, unsigned int flags); - static CUresult cuDriverGetVersion(int *driverVersion); - // device management - static CUresult cuDeviceGet(CUdevice *device, int ordinal); - static CUresult cuDeviceGetName(char *name, int len, CUdevice dev); - static CUresult cuDeviceGetPCIBusId(char *id, int len, CUdevice dev); - static CUresult cuDeviceGetAttribute(int *pi, CUdevice_attribute attrib, CUdevice dev); - static CUresult cuDeviceGetCount(int *count); - // link management - static CUresult cuLinkAddFile_v2(CUlinkState state, CUjitInputType type, const char *path, unsigned int numOptions, CUjit_option *options, void **optionValues); - static CUresult cuLinkAddData_v2(CUlinkState state, CUjitInputType type, void* data, size_t size, const char* name, unsigned int numOptions, CUjit_option* options, void** optionValues); - static CUresult cuLinkCreate_v2(unsigned int numOptions, CUjit_option* options, void** optionValues, CUlinkState* stateOut); - static CUresult cuLinkComplete(CUlinkState state, void** cubinOut, size_t* sizeOut); - static CUresult cuLinkDestroy(CUlinkState state); - // module management - static CUresult cuModuleGetGlobal_v2(CUdeviceptr *dptr, size_t* bytes, CUmodule hmod, const char *name); - static CUresult cuModuleLoad(CUmodule *module, const char *fname); - static CUresult cuModuleLoadData(CUmodule* module, const void* image); - static CUresult cuModuleUnload(CUmodule hmod); - static CUresult cuModuleLoadDataEx(CUmodule *module, const void *image, unsigned int numOptions, CUjit_option *options, void **optionValues); - static CUresult cuModuleGetFunction(CUfunction *hfunc, CUmodule hmod, const char *name); - // stream management - static CUresult cuStreamCreate(CUstream *phStream, unsigned int Flags); - static CUresult cuStreamSynchronize(CUstream hStream); - static CUresult cuStreamGetCtx(CUstream hStream, CUcontext* pctx); - static CUresult cuStreamDestroy_v2(CUstream hStream); - static CUresult cuLaunchKernel(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams, void **extra); - // function management - static CUresult cuFuncGetAttribute(int* pi, CUfunction_attribute attrib, CUfunction hfunc); - static CUresult cuFuncSetAttribute(CUfunction hfunc, CUfunction_attribute attrib, int value); - static CUresult cuFuncSetCacheConfig(CUfunction hfunc, CUfunc_cache config); - // memory management - static CUresult cuMemAlloc_v2(CUdeviceptr *dptr, size_t bytesize); - static CUresult cuPointerGetAttribute(void * data, CUpointer_attribute attribute, CUdeviceptr ptr); - static CUresult cuMemsetD8Async(CUdeviceptr dst, unsigned char x, size_t N, CUstream stream); - static CUresult cuMemcpyDtoH_v2(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount); - static CUresult cuMemFree_v2(CUdeviceptr dptr); - static CUresult cuMemcpyDtoHAsync_v2(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream); - static CUresult cuMemcpyHtoDAsync_v2(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream); - static CUresult cuMemcpyHtoD_v2(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount); - // event management - static CUresult cuEventCreate(CUevent *phEvent, unsigned int Flags); - static CUresult cuEventElapsedTime(float *pMilliseconds, CUevent hStart, CUevent hEnd); - static CUresult cuEventRecord(CUevent hEvent, CUstream hStream); - static CUresult cuEventDestroy_v2(CUevent hEvent); - - - /* ------------------- * - * NVML - * ------------------- */ - static nvmlReturn_t nvmlDeviceGetHandleByPciBusId_v2( const char* pciBusId, nvmlDevice_t* device); - static nvmlReturn_t nvmlDeviceGetClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int *clock); - static nvmlReturn_t nvmlDeviceGetMaxClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int *clock); - static nvmlReturn_t nvmlDeviceSetApplicationsClocks(nvmlDevice_t device, unsigned int mem_clock, unsigned int sm_clock); - - /* ------------------- * - * HIP - * ------------------- */ - // context management - static hipError_t hipInit(unsigned int Flags); - static hipError_t hipCtxDestroy(hipCtx_t ctx); - static hipError_t hipCtxCreate(hipCtx_t *pctx, unsigned int flags, hipDevice_t dev); - static hipError_t hipCtxPushCurrent(hipCtx_t ctx); - static hipError_t hipCtxPopCurrent(hipCtx_t *pctx); - static hipError_t hipCtxGetDevice(hipDevice_t* result); - static hipError_t hipCtxEnablePeerAccess(hipCtx_t peerContext, unsigned int flags); - static hipError_t hipDriverGetVersion(int *driverVersion); - // device management - static hipError_t hipGetDevice(hipDevice_t *device, int ordinal); - static hipError_t hipDeviceGetName(char *name, int len, hipDevice_t dev); - static hipError_t hipDeviceGetPCIBusId(char *id, int len, hipDevice_t dev); - static hipError_t hipDeviceGetAttribute(int *pi, hipDeviceAttribute_t attrib, hipDevice_t dev); - static hipError_t hipGetDeviceCount(int *count); - // module management - static hipError_t hipModuleGetGlobal(hipDeviceptr_t *dptr, size_t* bytes, hipModule_t hmod, const char *name); - static hipError_t hipModuleLoad(hipModule_t *module, const char *fname); - static hipError_t hipModuleLoadData(hipModule_t* module, const void* image); - static hipError_t hipModuleUnload(hipModule_t hmod); - static hipError_t hipModuleLoadDataEx(hipModule_t *module, const void *image, unsigned int numOptions, hipJitOption *options, void **optionValues); - static hipError_t hipModuleGetFunction(hipFunction_t *hfunc, hipModule_t hmod, const char *name); - // stream management - static hipError_t hipStreamCreate(hipStream_t *phStream, unsigned int Flags); - static hipError_t hipStreamSynchronize(hipStream_t hStream); - static hipError_t hipStreamDestroy(hipStream_t hStream); - static hipError_t hipModuleLaunchKernel(hipFunction_t f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, hipStream_t hStream, void **kernelParams, void **extra); - // function management - static hipError_t hipFuncGetAttributes(hipFuncAttributes* attrib, void* hfunc); - static hipError_t hipFuncSetAttribute(hipFunction_t hfunc, hipFuncAttribute attrib, int value); - static hipError_t hipFuncSetCacheConfig(hipFunction_t hfunc, hipFuncCache_t config); - // memory management - static hipError_t hipMalloc(hipDeviceptr_t *dptr, size_t bytesize); - static hipError_t hipPointerGetAttribute(void * data, CUpointer_attribute attribute, hipDeviceptr_t ptr); - static hipError_t hipMemsetD8Async(hipDeviceptr_t dst, unsigned char x, size_t N, hipStream_t stream); - static hipError_t hipMemcpyDtoH(void *dstHost, hipDeviceptr_t srcDevice, size_t ByteCount); - static hipError_t hipFree(hipDeviceptr_t dptr); - static hipError_t hipMemcpyDtoHAsync(void *dstHost, hipDeviceptr_t srcDevice, size_t ByteCount, hipStream_t hStream); - static hipError_t hipMemcpyHtoDAsync(hipDeviceptr_t dstDevice, const void *srcHost, size_t ByteCount, hipStream_t hStream); - static hipError_t hipMemcpyHtoD(hipDeviceptr_t dstDevice, const void *srcHost, size_t ByteCount); - // event management - static hipError_t hipEventCreate(hipEvent_t *phEvent, unsigned int Flags); - static hipError_t hipEventElapsedTime(float *pMilliseconds, hipEvent_t hStart, hipEvent_t hEnd); - static hipError_t hipEventRecord(hipEvent_t hEvent, hipStream_t hStream); - static hipError_t hipEventDestroy(hipEvent_t hEvent); - - - -private: - - // Libraries - static void* cuda_; - static void* nvml_; - static void* hip_; - - - /* ------------------- * - * CUDA - * ------------------- */ - // context management - static void* cuCtxGetCurrent_; - static void* cuCtxSetCurrent_; - static void* cuCtxDestroy_v2_; - static void* cuCtxCreate_v2_; - static void* cuCtxGetDevice_; - static void* cuCtxPushCurrent_v2_; - static void* cuCtxPopCurrent_v2_; - static void* cuCtxEnablePeerAccess_; - static void* cuDriverGetVersion_; - static void* cuInit_; - // device management - static void* cuDeviceGet_; - static void* cuDeviceGetName_; - static void* cuDeviceGetPCIBusId_; - static void* cuDeviceGetAttribute_; - static void* cuDeviceGetCount_; - // link management - static void* cuLinkAddFile_v2_; - static void* cuLinkAddData_v2_; - static void* cuLinkCreate_v2_; - static void* cuLinkDestroy_; - static void* cuLinkComplete_; - // module management - static void* cuModuleGetGlobal_v2_; - static void* cuModuleLoad_; - static void* cuModuleUnload_; - static void* cuModuleLoadDataEx_; - static void* cuModuleLoadData_; - static void* cuModuleGetFunction_; - // stream management - static void* cuStreamCreate_; - static void* cuStreamSynchronize_; - static void* cuStreamDestroy_v2_; - static void* cuStreamGetCtx_; - static void* cuLaunchKernel_; - // function management - static void* cuFuncGetAttribute_; - static void* cuFuncSetAttribute_; - static void* cuFuncSetCacheConfig_; - // memory management - static void* cuMemcpyDtoH_v2_; - static void* cuMemFree_v2_; - static void* cuMemcpyDtoHAsync_v2_; - static void* cuMemcpyHtoDAsync_v2_; - static void* cuMemcpyHtoD_v2_; - static void* cuMemAlloc_v2_; - static void* cuMemsetD8Async_; - static void* cuPointerGetAttribute_; - // event management - static void* cuEventCreate_; - static void* cuEventElapsedTime_; - static void* cuEventRecord_; - static void* cuEventDestroy_v2_; - - /* ------------------- * - * NVML - * ------------------- */ - static void* nvmlInit_v2_; - static void* nvmlDeviceGetHandleByPciBusId_v2_; - static void* nvmlDeviceGetClockInfo_; - static void* nvmlDeviceGetMaxClockInfo_; - static void* nvmlDeviceSetApplicationsClocks_; - - /* ------------------- * - * HIP - * ------------------- */ - // context management - static void* hipInit_; - static void* hipCtxDestroy_; - static void* hipCtxCreate_; - static void* hipCtxPushCurrent_; - static void* hipCtxPopCurrent_; - static void* hipCtxGetDevice_; - static void* hipCtxEnablePeerAccess_; - static void* hipDriverGetVersion_; - // device management - static void* hipGetDevice_; - static void* hipDeviceGetName_; - static void* hipDeviceGetPCIBusId_; - static void* hipDeviceGetAttribute_; - static void* hipGetDeviceCount_; - // module management - static void* hipModuleGetGlobal_; - static void* hipModuleLoad_; - static void* hipModuleLoadData_; - static void* hipModuleUnload_; - static void* hipModuleLoadDataEx_; - static void* hipModuleGetFunction_; - // stream management - static void* hipStreamCreate_; - static void* hipStreamSynchronize_; - static void* hipStreamDestroy_; - static void* hipModuleLaunchKernel_;; - // function management - static void* hipFuncGetAttributes_; - static void* hipFuncSetAttribute_; - static void* hipFuncSetCacheConfig_; - // memory management - static void* hipMalloc_; - static void* hipPointerGetAttribute_; - static void* hipMemsetD8Async_; - static void* hipMemcpyDtoH_; - static void* hipFree_; - static void* hipMemcpyDtoHAsync_; - static void* hipMemcpyHtoDAsync_; - static void* hipMemcpyHtoD_; - // event management - static void* hipEventCreate_; - static void* hipEventElapsedTime_; - static void* hipEventRecord_; - static void* hipEventDestroy_; -}; - -} -} - - -#endif diff --git a/include/triton/driver/error.h b/include/triton/driver/error.h deleted file mode 100755 index 6502b7493c39..000000000000 --- a/include/triton/driver/error.h +++ /dev/null @@ -1,220 +0,0 @@ -#pragma once - -#ifndef _TRITON_DRIVER_ERROR_H_ -#define _TRITON_DRIVER_ERROR_H_ - -#include -#include "triton/driver/dispatch.h" - - -namespace triton -{ - - namespace driver - { - - namespace exception - { - - namespace nvrtc - { - -#define TRITON_CREATE_NVRTC_EXCEPTION(name, msg) class name: public std::exception { public: const char * what() const throw(){ return "NVRTC: Error- " msg; } } - - TRITON_CREATE_NVRTC_EXCEPTION(out_of_memory ,"out of memory"); - TRITON_CREATE_NVRTC_EXCEPTION(program_creation_failure ,"program creation failure"); - TRITON_CREATE_NVRTC_EXCEPTION(invalid_input ,"invalid input"); - TRITON_CREATE_NVRTC_EXCEPTION(invalid_program ,"invalid program"); - TRITON_CREATE_NVRTC_EXCEPTION(invalid_option ,"invalid option"); - TRITON_CREATE_NVRTC_EXCEPTION(compilation ,"compilation"); - TRITON_CREATE_NVRTC_EXCEPTION(builtin_operation_failure ,"builtin operation failure"); - TRITON_CREATE_NVRTC_EXCEPTION(unknown_error ,"unknown error"); - -#undef TRITON_CREATE_NVRTC_EXCEPTION - } - - - namespace cuda - { - class base: public std::exception{}; - -#define TRITON_CREATE_CUDA_EXCEPTION(name, msg) class name: public base { public:const char * what() const throw(){ return "CUDA: Error- " msg; } } - - - TRITON_CREATE_CUDA_EXCEPTION(invalid_value ,"invalid value"); - TRITON_CREATE_CUDA_EXCEPTION(out_of_memory ,"out of memory"); - TRITON_CREATE_CUDA_EXCEPTION(not_initialized ,"not initialized"); - TRITON_CREATE_CUDA_EXCEPTION(deinitialized ,"deinitialized"); - TRITON_CREATE_CUDA_EXCEPTION(profiler_disabled ,"profiler disabled"); - TRITON_CREATE_CUDA_EXCEPTION(profiler_not_initialized ,"profiler not initialized"); - TRITON_CREATE_CUDA_EXCEPTION(profiler_already_started ,"profiler already started"); - TRITON_CREATE_CUDA_EXCEPTION(profiler_already_stopped ,"profiler already stopped"); - TRITON_CREATE_CUDA_EXCEPTION(no_device ,"no device"); - TRITON_CREATE_CUDA_EXCEPTION(invalid_device ,"invalid device"); - TRITON_CREATE_CUDA_EXCEPTION(invalid_image ,"invalid image"); - TRITON_CREATE_CUDA_EXCEPTION(invalid_context ,"invalid context"); - TRITON_CREATE_CUDA_EXCEPTION(context_already_current ,"context already current"); - TRITON_CREATE_CUDA_EXCEPTION(map_failed ,"map failed"); - TRITON_CREATE_CUDA_EXCEPTION(unmap_failed ,"unmap failed"); - TRITON_CREATE_CUDA_EXCEPTION(array_is_mapped ,"array is mapped"); - TRITON_CREATE_CUDA_EXCEPTION(already_mapped ,"already mapped"); - TRITON_CREATE_CUDA_EXCEPTION(no_binary_for_gpu ,"no binary for gpu"); - TRITON_CREATE_CUDA_EXCEPTION(already_acquired ,"already acquired"); - TRITON_CREATE_CUDA_EXCEPTION(not_mapped ,"not mapped"); - TRITON_CREATE_CUDA_EXCEPTION(not_mapped_as_array ,"not mapped as array"); - TRITON_CREATE_CUDA_EXCEPTION(not_mapped_as_pointer ,"not mapped as pointer"); - TRITON_CREATE_CUDA_EXCEPTION(ecc_uncorrectable ,"ecc uncorrectable"); - TRITON_CREATE_CUDA_EXCEPTION(unsupported_limit ,"unsupported limit"); - TRITON_CREATE_CUDA_EXCEPTION(context_already_in_use ,"context already in use"); - TRITON_CREATE_CUDA_EXCEPTION(peer_access_unsupported ,"peer access unsupported"); - TRITON_CREATE_CUDA_EXCEPTION(invalid_ptx ,"invalid ptx"); - TRITON_CREATE_CUDA_EXCEPTION(invalid_graphics_context ,"invalid graphics context"); - TRITON_CREATE_CUDA_EXCEPTION(invalid_source ,"invalid source"); - TRITON_CREATE_CUDA_EXCEPTION(file_not_found ,"file not found"); - TRITON_CREATE_CUDA_EXCEPTION(shared_object_symbol_not_found ,"shared object symbol not found"); - TRITON_CREATE_CUDA_EXCEPTION(shared_object_init_failed ,"shared object init failed"); - TRITON_CREATE_CUDA_EXCEPTION(operating_system ,"operating system"); - TRITON_CREATE_CUDA_EXCEPTION(invalid_handle ,"invalid handle"); - TRITON_CREATE_CUDA_EXCEPTION(not_found ,"not found"); - TRITON_CREATE_CUDA_EXCEPTION(not_ready ,"not ready"); - TRITON_CREATE_CUDA_EXCEPTION(illegal_address ,"illegal address"); - TRITON_CREATE_CUDA_EXCEPTION(launch_out_of_resources ,"launch out of resources"); - TRITON_CREATE_CUDA_EXCEPTION(launch_timeout ,"launch timeout"); - TRITON_CREATE_CUDA_EXCEPTION(launch_incompatible_texturing ,"launch incompatible texturing"); - TRITON_CREATE_CUDA_EXCEPTION(peer_access_already_enabled ,"peer access already enabled"); - TRITON_CREATE_CUDA_EXCEPTION(peer_access_not_enabled ,"peer access not enabled"); - TRITON_CREATE_CUDA_EXCEPTION(primary_context_active ,"primary context active"); - TRITON_CREATE_CUDA_EXCEPTION(context_is_destroyed ,"context is destroyed"); - TRITON_CREATE_CUDA_EXCEPTION(assert_error ,"assert"); - TRITON_CREATE_CUDA_EXCEPTION(too_many_peers ,"too many peers"); - TRITON_CREATE_CUDA_EXCEPTION(host_memory_already_registered ,"host memory already registered"); - TRITON_CREATE_CUDA_EXCEPTION(host_memory_not_registered ,"hot memory not registered"); - TRITON_CREATE_CUDA_EXCEPTION(hardware_stack_error ,"hardware stack error"); - TRITON_CREATE_CUDA_EXCEPTION(illegal_instruction ,"illegal instruction"); - TRITON_CREATE_CUDA_EXCEPTION(misaligned_address ,"misaligned address"); - TRITON_CREATE_CUDA_EXCEPTION(invalid_address_space ,"invalid address space"); - TRITON_CREATE_CUDA_EXCEPTION(invalid_pc ,"invalid pc"); - TRITON_CREATE_CUDA_EXCEPTION(launch_failed ,"launch failed"); - TRITON_CREATE_CUDA_EXCEPTION(not_permitted ,"not permitted"); - TRITON_CREATE_CUDA_EXCEPTION(not_supported ,"not supported"); - TRITON_CREATE_CUDA_EXCEPTION(unknown ,"unknown"); - -#undef TRITON_CREATE_CUDA_EXCEPTION - } - - namespace cublas - { - class base: public std::exception{}; - -#define TRITON_CREATE_CUBLAS_EXCEPTION(name, msg) class name: public base { public: const char * what() const throw(){ return "CUBLAS: Error- " msg; } } - - TRITON_CREATE_CUBLAS_EXCEPTION(not_initialized ,"not initialized"); - TRITON_CREATE_CUBLAS_EXCEPTION(alloc_failed ,"alloc failed"); - TRITON_CREATE_CUBLAS_EXCEPTION(invalid_value ,"invalid value"); - TRITON_CREATE_CUBLAS_EXCEPTION(arch_mismatch ,"arch mismatch"); - TRITON_CREATE_CUBLAS_EXCEPTION(mapping_error ,"mapping error"); - TRITON_CREATE_CUBLAS_EXCEPTION(execution_failed ,"execution failed"); - TRITON_CREATE_CUBLAS_EXCEPTION(internal_error ,"internal error"); - TRITON_CREATE_CUBLAS_EXCEPTION(not_supported ,"not supported"); - TRITON_CREATE_CUBLAS_EXCEPTION(license_error ,"license error"); - TRITON_CREATE_CUBLAS_EXCEPTION(unknown ,"unknown"); - -#undef TRITON_CREATE_CUBLAS_EXCEPTION - } - - namespace cudnn - { -#define TRITON_CREATE_CUDNN_EXCEPTION(name, msg) class name: public std::exception { public: const char * what() const throw(){ return "CUDNN: Error- " msg; } } - - TRITON_CREATE_CUDNN_EXCEPTION(not_initialized ,"not initialized"); - TRITON_CREATE_CUDNN_EXCEPTION(alloc_failed ,"allocation failed"); - TRITON_CREATE_CUDNN_EXCEPTION(bad_param ,"bad param"); - TRITON_CREATE_CUDNN_EXCEPTION(internal_error ,"internal error"); - TRITON_CREATE_CUDNN_EXCEPTION(invalid_value ,"invalid value"); - TRITON_CREATE_CUDNN_EXCEPTION(arch_mismatch ,"arch mismatch"); - TRITON_CREATE_CUDNN_EXCEPTION(mapping_error ,"mapping error"); - TRITON_CREATE_CUDNN_EXCEPTION(execution_failed ,"execution failed"); - TRITON_CREATE_CUDNN_EXCEPTION(not_supported ,"not supported"); - TRITON_CREATE_CUDNN_EXCEPTION(license_error ,"license error"); - TRITON_CREATE_CUDNN_EXCEPTION(runtime_prerequisite_missing ,"prerequisite missing"); - TRITON_CREATE_CUDNN_EXCEPTION(runtime_in_progress ,"runtime in progress"); - TRITON_CREATE_CUDNN_EXCEPTION(runtime_fp_overflow ,"runtime fp overflow"); - } - - - - - namespace hip - { - class base: public std::exception{}; - -#define TRITON_CREATE_HIP_EXCEPTION(name, msg) class name: public base { public:const char * what() const throw(){ return "HIP: Error- " msg; } } - - - TRITON_CREATE_HIP_EXCEPTION(invalid_value ,"invalid value"); - TRITON_CREATE_HIP_EXCEPTION(out_of_memory ,"out of memory"); - TRITON_CREATE_HIP_EXCEPTION(not_initialized ,"not initialized"); - TRITON_CREATE_HIP_EXCEPTION(deinitialized ,"deinitialized"); - TRITON_CREATE_HIP_EXCEPTION(profiler_disabled ,"profiler disabled"); - TRITON_CREATE_HIP_EXCEPTION(profiler_not_initialized ,"profiler not initialized"); - TRITON_CREATE_HIP_EXCEPTION(profiler_already_started ,"profiler already started"); - TRITON_CREATE_HIP_EXCEPTION(profiler_already_stopped ,"profiler already stopped"); - TRITON_CREATE_HIP_EXCEPTION(no_device ,"no device"); - TRITON_CREATE_HIP_EXCEPTION(invalid_device ,"invalid device"); - TRITON_CREATE_HIP_EXCEPTION(invalid_image ,"invalid image"); - TRITON_CREATE_HIP_EXCEPTION(invalid_context ,"invalid context"); - TRITON_CREATE_HIP_EXCEPTION(context_already_current ,"context already current"); - TRITON_CREATE_HIP_EXCEPTION(map_failed ,"map failed"); - TRITON_CREATE_HIP_EXCEPTION(unmap_failed ,"unmap failed"); - TRITON_CREATE_HIP_EXCEPTION(array_is_mapped ,"array is mapped"); - TRITON_CREATE_HIP_EXCEPTION(already_mapped ,"already mapped"); - TRITON_CREATE_HIP_EXCEPTION(no_binary_for_gpu ,"no binary for gpu"); - TRITON_CREATE_HIP_EXCEPTION(already_acquired ,"already acquired"); - TRITON_CREATE_HIP_EXCEPTION(not_mapped ,"not mapped"); - TRITON_CREATE_HIP_EXCEPTION(not_mapped_as_array ,"not mapped as array"); - TRITON_CREATE_HIP_EXCEPTION(not_mapped_as_pointer ,"not mapped as pointer"); - TRITON_CREATE_HIP_EXCEPTION(ecc_uncorrectable ,"ecc uncorrectable"); - TRITON_CREATE_HIP_EXCEPTION(unsupported_limit ,"unsupported limit"); - TRITON_CREATE_HIP_EXCEPTION(context_already_in_use ,"context already in use"); - TRITON_CREATE_HIP_EXCEPTION(peer_access_unsupported ,"peer access unsupported"); - TRITON_CREATE_HIP_EXCEPTION(invalid_ptx ,"invalid ptx"); - TRITON_CREATE_HIP_EXCEPTION(invalid_graphics_context ,"invalid graphics context"); - TRITON_CREATE_HIP_EXCEPTION(invalid_source ,"invalid source"); - TRITON_CREATE_HIP_EXCEPTION(file_not_found ,"file not found"); - TRITON_CREATE_HIP_EXCEPTION(shared_object_symbol_not_found ,"shared object symbol not found"); - TRITON_CREATE_HIP_EXCEPTION(shared_object_init_failed ,"shared object init failed"); - TRITON_CREATE_HIP_EXCEPTION(operating_system ,"operating system"); - TRITON_CREATE_HIP_EXCEPTION(invalid_handle ,"invalid handle"); - TRITON_CREATE_HIP_EXCEPTION(not_found ,"not found"); - TRITON_CREATE_HIP_EXCEPTION(not_ready ,"not ready"); - TRITON_CREATE_HIP_EXCEPTION(illegal_address ,"illegal address"); - TRITON_CREATE_HIP_EXCEPTION(launch_out_of_resources ,"launch out of resources"); - TRITON_CREATE_HIP_EXCEPTION(launch_timeout ,"launch timeout"); - TRITON_CREATE_HIP_EXCEPTION(launch_incompatible_texturing ,"launch incompatible texturing"); - TRITON_CREATE_HIP_EXCEPTION(peer_access_already_enabled ,"peer access already enabled"); - TRITON_CREATE_HIP_EXCEPTION(peer_access_not_enabled ,"peer access not enabled"); - TRITON_CREATE_HIP_EXCEPTION(primary_context_active ,"primary context active"); - TRITON_CREATE_HIP_EXCEPTION(context_is_destroyed ,"context is destroyed"); - TRITON_CREATE_HIP_EXCEPTION(assert_error ,"assert"); - TRITON_CREATE_HIP_EXCEPTION(too_many_peers ,"too many peers"); - TRITON_CREATE_HIP_EXCEPTION(host_memory_already_registered ,"host memory already registered"); - TRITON_CREATE_HIP_EXCEPTION(host_memory_not_registered ,"hot memory not registered"); - TRITON_CREATE_HIP_EXCEPTION(hardware_stack_error ,"hardware stack error"); - TRITON_CREATE_HIP_EXCEPTION(illegal_instruction ,"illegal instruction"); - TRITON_CREATE_HIP_EXCEPTION(misaligned_address ,"misaligned address"); - TRITON_CREATE_HIP_EXCEPTION(invalid_address_space ,"invalid address space"); - TRITON_CREATE_HIP_EXCEPTION(invalid_pc ,"invalid pc"); - TRITON_CREATE_HIP_EXCEPTION(launch_failed ,"launch failed"); - TRITON_CREATE_HIP_EXCEPTION(not_permitted ,"not permitted"); - TRITON_CREATE_HIP_EXCEPTION(not_supported ,"not supported"); - TRITON_CREATE_HIP_EXCEPTION(invalid_symbol ,"invalid symbol"); - TRITON_CREATE_HIP_EXCEPTION(unknown ,"unknown"); - -#undef TRITON_CREATE_CUDA_EXCEPTION - } - - } - } -} - -#endif diff --git a/include/triton/driver/llvm.h b/include/triton/driver/llvm.h deleted file mode 100644 index c0c1c0f377eb..000000000000 --- a/include/triton/driver/llvm.h +++ /dev/null @@ -1,20 +0,0 @@ -#include -#include "triton/driver/dispatch.h" - -namespace llvm{ -class Module; -} - -namespace triton{ -namespace driver{ - -void init_llvm(); -std::string path_to_ptxas(int& version); -std::string llir_to_ptx(llvm::Module* module, int cc, int version); -std::string ptx_to_cubin(const std::string& ptx, const std::string& ptxas_path, int cc); -CUmodule ptx_to_cumodule(const std::string& ptx, int cc); -std::string llir_to_amdgpu(llvm::Module* module, const std::string& proc); -hipModule_t amdgpu_to_hipmodule(const std::string& path); - -} -} diff --git a/include/triton/external/CUDA/cuda.h b/include/triton/external/CUDA/cuda.h deleted file mode 100644 index c6c273287e0d..000000000000 --- a/include/triton/external/CUDA/cuda.h +++ /dev/null @@ -1,18948 +0,0 @@ -/* - * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. - * - * NOTICE TO LICENSEE: - * - * This source code and/or documentation ("Licensed Deliverables") are - * subject to NVIDIA intellectual property rights under U.S. and - * international Copyright laws. - * - * These Licensed Deliverables contained herein is PROPRIETARY and - * CONFIDENTIAL to NVIDIA and is being provided under the terms and - * conditions of a form of NVIDIA software license agreement by and - * between NVIDIA and Licensee ("License Agreement") or electronically - * accepted by Licensee. Notwithstanding any terms or conditions to - * the contrary in the License Agreement, reproduction or disclosure - * of the Licensed Deliverables to any third party without the express - * written consent of NVIDIA is prohibited. - * - * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE - * LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE - * SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS - * PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND. - * NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED - * DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY, - * NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. - * NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE - * LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY - * SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY - * DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, - * WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS - * ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE - * OF THESE LICENSED DELIVERABLES. - * - * U.S. Government End Users. These Licensed Deliverables are a - * "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT - * 1995), consisting of "commercial computer software" and "commercial - * computer software documentation" as such terms are used in 48 - * C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government - * only as a commercial end item. Consistent with 48 C.F.R.12.212 and - * 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all - * U.S. Government End Users acquire the Licensed Deliverables with - * only those rights set forth herein. - * - * Any use of the Licensed Deliverables in individual and commercial - * software must include, in the user documentation and internal - * comments to the code, the above Disclaimer and U.S. Government End - * Users Notice. - */ - -#ifndef __cuda_cuda_h__ -#define __cuda_cuda_h__ - -#include -#ifdef _MSC_VER -typedef unsigned __int32 cuuint32_t; -typedef unsigned __int64 cuuint64_t; -#else -#include -typedef uint32_t cuuint32_t; -typedef uint64_t cuuint64_t; -#endif - -#if defined(__CUDA_API_VERSION_INTERNAL) || defined(__DOXYGEN_ONLY__) || defined(CUDA_ENABLE_DEPRECATED) -#define __CUDA_DEPRECATED -#elif defined(_MSC_VER) -#define __CUDA_DEPRECATED __declspec(deprecated) -#elif defined(__GNUC__) -#define __CUDA_DEPRECATED __attribute__((deprecated)) -#else -#define __CUDA_DEPRECATED -#endif - -#if defined(CUDA_FORCE_API_VERSION) -#error "CUDA_FORCE_API_VERSION is no longer supported." -#endif - -#if defined(__CUDA_API_VERSION_INTERNAL) || defined(CUDA_API_PER_THREAD_DEFAULT_STREAM) - #define __CUDA_API_PER_THREAD_DEFAULT_STREAM - #define __CUDA_API_PTDS(api) api ## _ptds - #define __CUDA_API_PTSZ(api) api ## _ptsz -#else - #define __CUDA_API_PTDS(api) api - #define __CUDA_API_PTSZ(api) api -#endif - -#define cuDeviceTotalMem cuDeviceTotalMem_v2 -#define cuCtxCreate cuCtxCreate_v2 -#define cuCtxCreate_v3 cuCtxCreate_v3 -#define cuModuleGetGlobal cuModuleGetGlobal_v2 -#define cuMemGetInfo cuMemGetInfo_v2 -#define cuMemAlloc cuMemAlloc_v2 -#define cuMemAllocPitch cuMemAllocPitch_v2 -#define cuMemFree cuMemFree_v2 -#define cuMemGetAddressRange cuMemGetAddressRange_v2 -#define cuMemAllocHost cuMemAllocHost_v2 -#define cuMemHostGetDevicePointer cuMemHostGetDevicePointer_v2 -#define cuMemcpyHtoD __CUDA_API_PTDS(cuMemcpyHtoD_v2) -#define cuMemcpyDtoH __CUDA_API_PTDS(cuMemcpyDtoH_v2) -#define cuMemcpyDtoD __CUDA_API_PTDS(cuMemcpyDtoD_v2) -#define cuMemcpyDtoA __CUDA_API_PTDS(cuMemcpyDtoA_v2) -#define cuMemcpyAtoD __CUDA_API_PTDS(cuMemcpyAtoD_v2) -#define cuMemcpyHtoA __CUDA_API_PTDS(cuMemcpyHtoA_v2) -#define cuMemcpyAtoH __CUDA_API_PTDS(cuMemcpyAtoH_v2) -#define cuMemcpyAtoA __CUDA_API_PTDS(cuMemcpyAtoA_v2) -#define cuMemcpyHtoAAsync __CUDA_API_PTSZ(cuMemcpyHtoAAsync_v2) -#define cuMemcpyAtoHAsync __CUDA_API_PTSZ(cuMemcpyAtoHAsync_v2) -#define cuMemcpy2D __CUDA_API_PTDS(cuMemcpy2D_v2) -#define cuMemcpy2DUnaligned __CUDA_API_PTDS(cuMemcpy2DUnaligned_v2) -#define cuMemcpy3D __CUDA_API_PTDS(cuMemcpy3D_v2) -#define cuMemcpyHtoDAsync __CUDA_API_PTSZ(cuMemcpyHtoDAsync_v2) -#define cuMemcpyDtoHAsync __CUDA_API_PTSZ(cuMemcpyDtoHAsync_v2) -#define cuMemcpyDtoDAsync __CUDA_API_PTSZ(cuMemcpyDtoDAsync_v2) -#define cuMemcpy2DAsync __CUDA_API_PTSZ(cuMemcpy2DAsync_v2) -#define cuMemcpy3DAsync __CUDA_API_PTSZ(cuMemcpy3DAsync_v2) -#define cuMemsetD8 __CUDA_API_PTDS(cuMemsetD8_v2) -#define cuMemsetD16 __CUDA_API_PTDS(cuMemsetD16_v2) -#define cuMemsetD32 __CUDA_API_PTDS(cuMemsetD32_v2) -#define cuMemsetD2D8 __CUDA_API_PTDS(cuMemsetD2D8_v2) -#define cuMemsetD2D16 __CUDA_API_PTDS(cuMemsetD2D16_v2) -#define cuMemsetD2D32 __CUDA_API_PTDS(cuMemsetD2D32_v2) -#define cuArrayCreate cuArrayCreate_v2 -#define cuArrayGetDescriptor cuArrayGetDescriptor_v2 -#define cuArray3DCreate cuArray3DCreate_v2 -#define cuArray3DGetDescriptor cuArray3DGetDescriptor_v2 -#define cuTexRefSetAddress cuTexRefSetAddress_v2 -#define cuTexRefGetAddress cuTexRefGetAddress_v2 -#define cuGraphicsResourceGetMappedPointer cuGraphicsResourceGetMappedPointer_v2 -#define cuCtxDestroy cuCtxDestroy_v2 -#define cuCtxPopCurrent cuCtxPopCurrent_v2 -#define cuCtxPushCurrent cuCtxPushCurrent_v2 -#define cuStreamDestroy cuStreamDestroy_v2 -#define cuEventDestroy cuEventDestroy_v2 -#define cuTexRefSetAddress2D cuTexRefSetAddress2D_v3 -#define cuLinkCreate cuLinkCreate_v2 -#define cuLinkAddData cuLinkAddData_v2 -#define cuLinkAddFile cuLinkAddFile_v2 -#define cuMemHostRegister cuMemHostRegister_v2 -#define cuGraphicsResourceSetMapFlags cuGraphicsResourceSetMapFlags_v2 -#define cuStreamBeginCapture __CUDA_API_PTSZ(cuStreamBeginCapture_v2) -#define cuDevicePrimaryCtxRelease cuDevicePrimaryCtxRelease_v2 -#define cuDevicePrimaryCtxReset cuDevicePrimaryCtxReset_v2 -#define cuDevicePrimaryCtxSetFlags cuDevicePrimaryCtxSetFlags_v2 -#define cuDeviceGetUuid_v2 cuDeviceGetUuid_v2 -#define cuIpcOpenMemHandle cuIpcOpenMemHandle_v2 -#define cuGraphInstantiate cuGraphInstantiate_v2 - -#if defined(__CUDA_API_PER_THREAD_DEFAULT_STREAM) - #define cuMemcpy __CUDA_API_PTDS(cuMemcpy) - #define cuMemcpyAsync __CUDA_API_PTSZ(cuMemcpyAsync) - #define cuMemcpyPeer __CUDA_API_PTDS(cuMemcpyPeer) - #define cuMemcpyPeerAsync __CUDA_API_PTSZ(cuMemcpyPeerAsync) - #define cuMemcpy3DPeer __CUDA_API_PTDS(cuMemcpy3DPeer) - #define cuMemcpy3DPeerAsync __CUDA_API_PTSZ(cuMemcpy3DPeerAsync) - #define cuMemPrefetchAsync __CUDA_API_PTSZ(cuMemPrefetchAsync) - - #define cuMemsetD8Async __CUDA_API_PTSZ(cuMemsetD8Async) - #define cuMemsetD16Async __CUDA_API_PTSZ(cuMemsetD16Async) - #define cuMemsetD32Async __CUDA_API_PTSZ(cuMemsetD32Async) - #define cuMemsetD2D8Async __CUDA_API_PTSZ(cuMemsetD2D8Async) - #define cuMemsetD2D16Async __CUDA_API_PTSZ(cuMemsetD2D16Async) - #define cuMemsetD2D32Async __CUDA_API_PTSZ(cuMemsetD2D32Async) - - #define cuStreamGetPriority __CUDA_API_PTSZ(cuStreamGetPriority) - #define cuStreamGetFlags __CUDA_API_PTSZ(cuStreamGetFlags) - #define cuStreamGetCtx __CUDA_API_PTSZ(cuStreamGetCtx) - #define cuStreamWaitEvent __CUDA_API_PTSZ(cuStreamWaitEvent) - #define cuStreamEndCapture __CUDA_API_PTSZ(cuStreamEndCapture) - #define cuStreamIsCapturing __CUDA_API_PTSZ(cuStreamIsCapturing) - #define cuStreamGetCaptureInfo __CUDA_API_PTSZ(cuStreamGetCaptureInfo) - #define cuStreamGetCaptureInfo_v2 __CUDA_API_PTSZ(cuStreamGetCaptureInfo_v2) - #define cuStreamUpdateCaptureDependencies __CUDA_API_PTSZ(cuStreamUpdateCaptureDependencies) - #define cuStreamAddCallback __CUDA_API_PTSZ(cuStreamAddCallback) - #define cuStreamAttachMemAsync __CUDA_API_PTSZ(cuStreamAttachMemAsync) - #define cuStreamQuery __CUDA_API_PTSZ(cuStreamQuery) - #define cuStreamSynchronize __CUDA_API_PTSZ(cuStreamSynchronize) - #define cuEventRecord __CUDA_API_PTSZ(cuEventRecord) - #define cuEventRecordWithFlags __CUDA_API_PTSZ(cuEventRecordWithFlags) - #define cuLaunchKernel __CUDA_API_PTSZ(cuLaunchKernel) - #define cuLaunchHostFunc __CUDA_API_PTSZ(cuLaunchHostFunc) - #define cuGraphicsMapResources __CUDA_API_PTSZ(cuGraphicsMapResources) - #define cuGraphicsUnmapResources __CUDA_API_PTSZ(cuGraphicsUnmapResources) - - #define cuStreamWriteValue32 __CUDA_API_PTSZ(cuStreamWriteValue32) - #define cuStreamWaitValue32 __CUDA_API_PTSZ(cuStreamWaitValue32) - #define cuStreamWriteValue64 __CUDA_API_PTSZ(cuStreamWriteValue64) - #define cuStreamWaitValue64 __CUDA_API_PTSZ(cuStreamWaitValue64) - #define cuStreamBatchMemOp __CUDA_API_PTSZ(cuStreamBatchMemOp) - - #define cuLaunchCooperativeKernel __CUDA_API_PTSZ(cuLaunchCooperativeKernel) - - #define cuSignalExternalSemaphoresAsync __CUDA_API_PTSZ(cuSignalExternalSemaphoresAsync) - #define cuWaitExternalSemaphoresAsync __CUDA_API_PTSZ(cuWaitExternalSemaphoresAsync) - - #define cuGraphUpload __CUDA_API_PTSZ(cuGraphUpload) - #define cuGraphLaunch __CUDA_API_PTSZ(cuGraphLaunch) - #define cuStreamCopyAttributes __CUDA_API_PTSZ(cuStreamCopyAttributes) - #define cuStreamGetAttribute __CUDA_API_PTSZ(cuStreamGetAttribute) - #define cuStreamSetAttribute __CUDA_API_PTSZ(cuStreamSetAttribute) - #define cuMemMapArrayAsync __CUDA_API_PTSZ(cuMemMapArrayAsync) - - #define cuMemFreeAsync __CUDA_API_PTSZ(cuMemFreeAsync) - #define cuMemAllocAsync __CUDA_API_PTSZ(cuMemAllocAsync) - #define cuMemAllocFromPoolAsync __CUDA_API_PTSZ(cuMemAllocFromPoolAsync) -#endif - -/** - * \file cuda.h - * \brief Header file for the CUDA Toolkit application programming interface. - * - * \file cudaGL.h - * \brief Header file for the OpenGL interoperability functions of the - * low-level CUDA driver application programming interface. - * - * \file cudaD3D9.h - * \brief Header file for the Direct3D 9 interoperability functions of the - * low-level CUDA driver application programming interface. - */ - -/** - * \defgroup CUDA_TYPES Data types used by CUDA driver - * @{ - */ - -/** - * CUDA API version number - */ -#define CUDA_VERSION 11040 - -#ifdef __cplusplus -extern "C" { -#endif - -/** - * CUDA device pointer - * CUdeviceptr is defined as an unsigned integer type whose size matches the size of a pointer on the target platform. - */ -#if defined(_WIN64) || defined(__LP64__) -typedef unsigned long long CUdeviceptr_v2; -#else -typedef unsigned int CUdeviceptr_v2; -#endif -typedef CUdeviceptr_v2 CUdeviceptr; /**< CUDA device pointer */ - -typedef int CUdevice_v1; /**< CUDA device */ -typedef CUdevice_v1 CUdevice; /**< CUDA device */ -typedef struct CUctx_st *CUcontext; /**< CUDA context */ -typedef struct CUmod_st *CUmodule; /**< CUDA module */ -typedef struct CUfunc_st *CUfunction; /**< CUDA function */ -typedef struct CUarray_st *CUarray; /**< CUDA array */ -typedef struct CUmipmappedArray_st *CUmipmappedArray; /**< CUDA mipmapped array */ -typedef struct CUtexref_st *CUtexref; /**< CUDA texture reference */ -typedef struct CUsurfref_st *CUsurfref; /**< CUDA surface reference */ -typedef struct CUevent_st *CUevent; /**< CUDA event */ -typedef struct CUstream_st *CUstream; /**< CUDA stream */ -typedef struct CUgraphicsResource_st *CUgraphicsResource; /**< CUDA graphics interop resource */ -typedef unsigned long long CUtexObject_v1; /**< An opaque value that represents a CUDA texture object */ -typedef CUtexObject_v1 CUtexObject; /**< An opaque value that represents a CUDA texture object */ -typedef unsigned long long CUsurfObject_v1; /**< An opaque value that represents a CUDA surface object */ -typedef CUsurfObject_v1 CUsurfObject; /**< An opaque value that represents a CUDA surface object */ -typedef struct CUextMemory_st *CUexternalMemory; /**< CUDA external memory */ -typedef struct CUextSemaphore_st *CUexternalSemaphore; /**< CUDA external semaphore */ -typedef struct CUgraph_st *CUgraph; /**< CUDA graph */ -typedef struct CUgraphNode_st *CUgraphNode; /**< CUDA graph node */ -typedef struct CUgraphExec_st *CUgraphExec; /**< CUDA executable graph */ -typedef struct CUmemPoolHandle_st *CUmemoryPool; /**< CUDA memory pool */ -typedef struct CUuserObject_st *CUuserObject; /**< CUDA user object for graphs */ - -#ifndef CU_UUID_HAS_BEEN_DEFINED -#define CU_UUID_HAS_BEEN_DEFINED -typedef struct CUuuid_st { /**< CUDA definition of UUID */ - char bytes[16]; -} CUuuid; -#endif - -/** - * CUDA IPC handle size - */ -#define CU_IPC_HANDLE_SIZE 64 - -/** - * CUDA IPC event handle - */ -typedef struct CUipcEventHandle_st { - char reserved[CU_IPC_HANDLE_SIZE]; -} CUipcEventHandle_v1; -typedef CUipcEventHandle_v1 CUipcEventHandle; - -/** - * CUDA IPC mem handle - */ -typedef struct CUipcMemHandle_st { - char reserved[CU_IPC_HANDLE_SIZE]; -} CUipcMemHandle_v1; -typedef CUipcMemHandle_v1 CUipcMemHandle; - -/** - * CUDA Ipc Mem Flags - */ -typedef enum CUipcMem_flags_enum { - CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS = 0x1 /**< Automatically enable peer access between remote devices as needed */ -} CUipcMem_flags; - - -/** - * CUDA Mem Attach Flags - */ -typedef enum CUmemAttach_flags_enum { - CU_MEM_ATTACH_GLOBAL = 0x1, /**< Memory can be accessed by any stream on any device */ - CU_MEM_ATTACH_HOST = 0x2, /**< Memory cannot be accessed by any stream on any device */ - CU_MEM_ATTACH_SINGLE = 0x4 /**< Memory can only be accessed by a single stream on the associated device */ -} CUmemAttach_flags; - -/** - * Context creation flags - */ -typedef enum CUctx_flags_enum { - CU_CTX_SCHED_AUTO = 0x00, /**< Automatic scheduling */ - CU_CTX_SCHED_SPIN = 0x01, /**< Set spin as default scheduling */ - CU_CTX_SCHED_YIELD = 0x02, /**< Set yield as default scheduling */ - CU_CTX_SCHED_BLOCKING_SYNC = 0x04, /**< Set blocking synchronization as default scheduling */ - CU_CTX_BLOCKING_SYNC = 0x04, /**< Set blocking synchronization as default scheduling - * \deprecated This flag was deprecated as of CUDA 4.0 - * and was replaced with ::CU_CTX_SCHED_BLOCKING_SYNC. */ - CU_CTX_SCHED_MASK = 0x07, - CU_CTX_MAP_HOST = 0x08, /**< \deprecated This flag was deprecated as of CUDA 11.0 - * and it no longer has any effect. All contexts - * as of CUDA 3.2 behave as though the flag is enabled. */ - CU_CTX_LMEM_RESIZE_TO_MAX = 0x10, /**< Keep local memory allocation after launch */ - CU_CTX_FLAGS_MASK = 0x1f -} CUctx_flags; - -/** - * Stream creation flags - */ -typedef enum CUstream_flags_enum { - CU_STREAM_DEFAULT = 0x0, /**< Default stream flag */ - CU_STREAM_NON_BLOCKING = 0x1 /**< Stream does not synchronize with stream 0 (the NULL stream) */ -} CUstream_flags; - -/** - * Legacy stream handle - * - * Stream handle that can be passed as a CUstream to use an implicit stream - * with legacy synchronization behavior. - * - * See details of the \link_sync_behavior - */ -#define CU_STREAM_LEGACY ((CUstream)0x1) - -/** - * Per-thread stream handle - * - * Stream handle that can be passed as a CUstream to use an implicit stream - * with per-thread synchronization behavior. - * - * See details of the \link_sync_behavior - */ -#define CU_STREAM_PER_THREAD ((CUstream)0x2) - -/** - * Event creation flags - */ -typedef enum CUevent_flags_enum { - CU_EVENT_DEFAULT = 0x0, /**< Default event flag */ - CU_EVENT_BLOCKING_SYNC = 0x1, /**< Event uses blocking synchronization */ - CU_EVENT_DISABLE_TIMING = 0x2, /**< Event will not record timing data */ - CU_EVENT_INTERPROCESS = 0x4 /**< Event is suitable for interprocess use. CU_EVENT_DISABLE_TIMING must be set */ -} CUevent_flags; - -/** - * Event record flags - */ -typedef enum CUevent_record_flags_enum { - CU_EVENT_RECORD_DEFAULT = 0x0, /**< Default event record flag */ - CU_EVENT_RECORD_EXTERNAL = 0x1 /**< When using stream capture, create an event record node - * instead of the default behavior. This flag is invalid - * when used outside of capture. */ -} CUevent_record_flags; - -/** - * Event wait flags - */ -typedef enum CUevent_wait_flags_enum { - CU_EVENT_WAIT_DEFAULT = 0x0, /**< Default event wait flag */ - CU_EVENT_WAIT_EXTERNAL = 0x1 /**< When using stream capture, create an event wait node - * instead of the default behavior. This flag is invalid - * when used outside of capture.*/ -} CUevent_wait_flags; - -/** - * Flags for ::cuStreamWaitValue32 and ::cuStreamWaitValue64 - */ -typedef enum CUstreamWaitValue_flags_enum { - CU_STREAM_WAIT_VALUE_GEQ = 0x0, /**< Wait until (int32_t)(*addr - value) >= 0 (or int64_t for 64 bit - values). Note this is a cyclic comparison which ignores wraparound. - (Default behavior.) */ - CU_STREAM_WAIT_VALUE_EQ = 0x1, /**< Wait until *addr == value. */ - CU_STREAM_WAIT_VALUE_AND = 0x2, /**< Wait until (*addr & value) != 0. */ - CU_STREAM_WAIT_VALUE_NOR = 0x3, /**< Wait until ~(*addr | value) != 0. Support for this operation can be - queried with ::cuDeviceGetAttribute() and - ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR.*/ - CU_STREAM_WAIT_VALUE_FLUSH = 1<<30 /**< Follow the wait operation with a flush of outstanding remote writes. This - means that, if a remote write operation is guaranteed to have reached the - device before the wait can be satisfied, that write is guaranteed to be - visible to downstream device work. The device is permitted to reorder - remote writes internally. For example, this flag would be required if - two remote writes arrive in a defined order, the wait is satisfied by the - second write, and downstream work needs to observe the first write. - Support for this operation is restricted to selected platforms and can be - queried with ::CU_DEVICE_ATTRIBUTE_CAN_USE_WAIT_VALUE_FLUSH.*/ -} CUstreamWaitValue_flags; - -/** - * Flags for ::cuStreamWriteValue32 - */ -typedef enum CUstreamWriteValue_flags_enum { - CU_STREAM_WRITE_VALUE_DEFAULT = 0x0, /**< Default behavior */ - CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER = 0x1 /**< Permits the write to be reordered with writes which were issued - before it, as a performance optimization. Normally, - ::cuStreamWriteValue32 will provide a memory fence before the - write, which has similar semantics to - __threadfence_system() but is scoped to the stream - rather than a CUDA thread. */ -} CUstreamWriteValue_flags; - -/** - * Operations for ::cuStreamBatchMemOp - */ -typedef enum CUstreamBatchMemOpType_enum { - CU_STREAM_MEM_OP_WAIT_VALUE_32 = 1, /**< Represents a ::cuStreamWaitValue32 operation */ - CU_STREAM_MEM_OP_WRITE_VALUE_32 = 2, /**< Represents a ::cuStreamWriteValue32 operation */ - CU_STREAM_MEM_OP_WAIT_VALUE_64 = 4, /**< Represents a ::cuStreamWaitValue64 operation */ - CU_STREAM_MEM_OP_WRITE_VALUE_64 = 5, /**< Represents a ::cuStreamWriteValue64 operation */ - CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES = 3 /**< This has the same effect as ::CU_STREAM_WAIT_VALUE_FLUSH, but as a - standalone operation. */ -} CUstreamBatchMemOpType; - -/** - * Per-operation parameters for ::cuStreamBatchMemOp - */ -typedef union CUstreamBatchMemOpParams_union { - CUstreamBatchMemOpType operation; - struct CUstreamMemOpWaitValueParams_st { - CUstreamBatchMemOpType operation; - CUdeviceptr address; - union { - cuuint32_t value; - cuuint64_t value64; - }; - unsigned int flags; - CUdeviceptr alias; /**< For driver internal use. Initial value is unimportant. */ - } waitValue; - struct CUstreamMemOpWriteValueParams_st { - CUstreamBatchMemOpType operation; - CUdeviceptr address; - union { - cuuint32_t value; - cuuint64_t value64; - }; - unsigned int flags; - CUdeviceptr alias; /**< For driver internal use. Initial value is unimportant. */ - } writeValue; - struct CUstreamMemOpFlushRemoteWritesParams_st { - CUstreamBatchMemOpType operation; - unsigned int flags; - } flushRemoteWrites; - cuuint64_t pad[6]; -} CUstreamBatchMemOpParams_v1; -typedef CUstreamBatchMemOpParams_v1 CUstreamBatchMemOpParams; - -/** - * Occupancy calculator flag - */ -typedef enum CUoccupancy_flags_enum { - CU_OCCUPANCY_DEFAULT = 0x0, /**< Default behavior */ - CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE = 0x1 /**< Assume global caching is enabled and cannot be automatically turned off */ -} CUoccupancy_flags; - -/** - * Flags for ::cuStreamUpdateCaptureDependencies - */ -typedef enum CUstreamUpdateCaptureDependencies_flags_enum { - CU_STREAM_ADD_CAPTURE_DEPENDENCIES = 0x0, /**< Add new nodes to the dependency set */ - CU_STREAM_SET_CAPTURE_DEPENDENCIES = 0x1 /**< Replace the dependency set with the new nodes */ -} CUstreamUpdateCaptureDependencies_flags; - -/** - * Array formats - */ -typedef enum CUarray_format_enum { - CU_AD_FORMAT_UNSIGNED_INT8 = 0x01, /**< Unsigned 8-bit integers */ - CU_AD_FORMAT_UNSIGNED_INT16 = 0x02, /**< Unsigned 16-bit integers */ - CU_AD_FORMAT_UNSIGNED_INT32 = 0x03, /**< Unsigned 32-bit integers */ - CU_AD_FORMAT_SIGNED_INT8 = 0x08, /**< Signed 8-bit integers */ - CU_AD_FORMAT_SIGNED_INT16 = 0x09, /**< Signed 16-bit integers */ - CU_AD_FORMAT_SIGNED_INT32 = 0x0a, /**< Signed 32-bit integers */ - CU_AD_FORMAT_HALF = 0x10, /**< 16-bit floating point */ - CU_AD_FORMAT_FLOAT = 0x20, /**< 32-bit floating point */ - CU_AD_FORMAT_NV12 = 0xb0 -} CUarray_format; - -/** - * Texture reference addressing modes - */ -typedef enum CUaddress_mode_enum { - CU_TR_ADDRESS_MODE_WRAP = 0, /**< Wrapping address mode */ - CU_TR_ADDRESS_MODE_CLAMP = 1, /**< Clamp to edge address mode */ - CU_TR_ADDRESS_MODE_MIRROR = 2, /**< Mirror address mode */ - CU_TR_ADDRESS_MODE_BORDER = 3 /**< Border address mode */ -} CUaddress_mode; - -/** - * Texture reference filtering modes - */ -typedef enum CUfilter_mode_enum { - CU_TR_FILTER_MODE_POINT = 0, /**< Point filter mode */ - CU_TR_FILTER_MODE_LINEAR = 1 /**< Linear filter mode */ -} CUfilter_mode; - -/** - * Device properties - */ -typedef enum CUdevice_attribute_enum { - CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK = 1, /**< Maximum number of threads per block */ - CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X = 2, /**< Maximum block dimension X */ - CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y = 3, /**< Maximum block dimension Y */ - CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z = 4, /**< Maximum block dimension Z */ - CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X = 5, /**< Maximum grid dimension X */ - CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y = 6, /**< Maximum grid dimension Y */ - CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z = 7, /**< Maximum grid dimension Z */ - CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK = 8, /**< Maximum shared memory available per block in bytes */ - CU_DEVICE_ATTRIBUTE_SHARED_MEMORY_PER_BLOCK = 8, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK */ - CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY = 9, /**< Memory available on device for __constant__ variables in a CUDA C kernel in bytes */ - CU_DEVICE_ATTRIBUTE_WARP_SIZE = 10, /**< Warp size in threads */ - CU_DEVICE_ATTRIBUTE_MAX_PITCH = 11, /**< Maximum pitch in bytes allowed by memory copies */ - CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK = 12, /**< Maximum number of 32-bit registers available per block */ - CU_DEVICE_ATTRIBUTE_REGISTERS_PER_BLOCK = 12, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK */ - CU_DEVICE_ATTRIBUTE_CLOCK_RATE = 13, /**< Typical clock frequency in kilohertz */ - CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT = 14, /**< Alignment requirement for textures */ - CU_DEVICE_ATTRIBUTE_GPU_OVERLAP = 15, /**< Device can possibly copy memory and execute a kernel concurrently. Deprecated. Use instead CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT. */ - CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT = 16, /**< Number of multiprocessors on device */ - CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT = 17, /**< Specifies whether there is a run time limit on kernels */ - CU_DEVICE_ATTRIBUTE_INTEGRATED = 18, /**< Device is integrated with host memory */ - CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY = 19, /**< Device can map host memory into CUDA address space */ - CU_DEVICE_ATTRIBUTE_COMPUTE_MODE = 20, /**< Compute mode (See ::CUcomputemode for details) */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH = 21, /**< Maximum 1D texture width */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH = 22, /**< Maximum 2D texture width */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT = 23, /**< Maximum 2D texture height */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH = 24, /**< Maximum 3D texture width */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT = 25, /**< Maximum 3D texture height */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH = 26, /**< Maximum 3D texture depth */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH = 27, /**< Maximum 2D layered texture width */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT = 28, /**< Maximum 2D layered texture height */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS = 29, /**< Maximum layers in a 2D layered texture */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_WIDTH = 27, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_HEIGHT = 28, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_ARRAY_NUMSLICES = 29, /**< Deprecated, use CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS */ - CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT = 30, /**< Alignment requirement for surfaces */ - CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS = 31, /**< Device can possibly execute multiple kernels concurrently */ - CU_DEVICE_ATTRIBUTE_ECC_ENABLED = 32, /**< Device has ECC support enabled */ - CU_DEVICE_ATTRIBUTE_PCI_BUS_ID = 33, /**< PCI bus ID of the device */ - CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID = 34, /**< PCI device ID of the device */ - CU_DEVICE_ATTRIBUTE_TCC_DRIVER = 35, /**< Device is using TCC driver model */ - CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE = 36, /**< Peak memory clock frequency in kilohertz */ - CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH = 37, /**< Global memory bus width in bits */ - CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE = 38, /**< Size of L2 cache in bytes */ - CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR = 39, /**< Maximum resident threads per multiprocessor */ - CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT = 40, /**< Number of asynchronous engines */ - CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING = 41, /**< Device shares a unified address space with the host */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH = 42, /**< Maximum 1D layered texture width */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS = 43, /**< Maximum layers in a 1D layered texture */ - CU_DEVICE_ATTRIBUTE_CAN_TEX2D_GATHER = 44, /**< Deprecated, do not use. */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH = 45, /**< Maximum 2D texture width if CUDA_ARRAY3D_TEXTURE_GATHER is set */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT = 46, /**< Maximum 2D texture height if CUDA_ARRAY3D_TEXTURE_GATHER is set */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE = 47, /**< Alternate maximum 3D texture width */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE = 48, /**< Alternate maximum 3D texture height */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE = 49, /**< Alternate maximum 3D texture depth */ - CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID = 50, /**< PCI domain ID of the device */ - CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT = 51, /**< Pitch alignment requirement for textures */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH = 52, /**< Maximum cubemap texture width/height */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH = 53, /**< Maximum cubemap layered texture width/height */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS = 54, /**< Maximum layers in a cubemap layered texture */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH = 55, /**< Maximum 1D surface width */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH = 56, /**< Maximum 2D surface width */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT = 57, /**< Maximum 2D surface height */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH = 58, /**< Maximum 3D surface width */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT = 59, /**< Maximum 3D surface height */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH = 60, /**< Maximum 3D surface depth */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH = 61, /**< Maximum 1D layered surface width */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS = 62, /**< Maximum layers in a 1D layered surface */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH = 63, /**< Maximum 2D layered surface width */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT = 64, /**< Maximum 2D layered surface height */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS = 65, /**< Maximum layers in a 2D layered surface */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH = 66, /**< Maximum cubemap surface width */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH = 67, /**< Maximum cubemap layered surface width */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS = 68, /**< Maximum layers in a cubemap layered surface */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH = 69, /**< Deprecated, do not use. Use cudaDeviceGetTexture1DLinearMaxWidth() or cuDeviceGetTexture1DLinearMaxWidth() instead. */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH = 70, /**< Maximum 2D linear texture width */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT = 71, /**< Maximum 2D linear texture height */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH = 72, /**< Maximum 2D linear texture pitch in bytes */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH = 73, /**< Maximum mipmapped 2D texture width */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT = 74, /**< Maximum mipmapped 2D texture height */ - CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR = 75, /**< Major compute capability version number */ - CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR = 76, /**< Minor compute capability version number */ - CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH = 77, /**< Maximum mipmapped 1D texture width */ - CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED = 78, /**< Device supports stream priorities */ - CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED = 79, /**< Device supports caching globals in L1 */ - CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED = 80, /**< Device supports caching locals in L1 */ - CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR = 81, /**< Maximum shared memory available per multiprocessor in bytes */ - CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR = 82, /**< Maximum number of 32-bit registers available per multiprocessor */ - CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY = 83, /**< Device can allocate managed memory on this system */ - CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD = 84, /**< Device is on a multi-GPU board */ - CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID = 85, /**< Unique id for a group of devices on the same multi-GPU board */ - CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED = 86, /**< Link between the device and the host supports native atomic operations (this is a placeholder attribute, and is not supported on any current hardware)*/ - CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO = 87, /**< Ratio of single precision performance (in floating-point operations per second) to double precision performance */ - CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS = 88, /**< Device supports coherently accessing pageable memory without calling cudaHostRegister on it */ - CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS = 89, /**< Device can coherently access managed memory concurrently with the CPU */ - CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED = 90, /**< Device supports compute preemption. */ - CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM = 91, /**< Device can access host registered memory at the same virtual address as the CPU */ - CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS = 92, /**< ::cuStreamBatchMemOp and related APIs are supported. */ - CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS = 93, /**< 64-bit operations are supported in ::cuStreamBatchMemOp and related APIs. */ - CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR = 94, /**< ::CU_STREAM_WAIT_VALUE_NOR is supported. */ - CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH = 95, /**< Device supports launching cooperative kernels via ::cuLaunchCooperativeKernel */ - CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH = 96, /**< Deprecated, ::cuLaunchCooperativeKernelMultiDevice is deprecated. */ - CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN = 97, /**< Maximum optin shared memory per block */ - CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES = 98, /**< The ::CU_STREAM_WAIT_VALUE_FLUSH flag and the ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device. See \ref CUDA_MEMOP for additional details. */ - CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED = 99, /**< Device supports host memory registration via ::cudaHostRegister. */ - CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES = 100, /**< Device accesses pageable memory via the host's page tables. */ - CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST = 101, /**< The host can directly access managed memory on the device without migration. */ - CU_DEVICE_ATTRIBUTE_VIRTUAL_ADDRESS_MANAGEMENT_SUPPORTED = 102, /**< Deprecated, Use CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED*/ - CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED = 102, /**< Device supports virtual memory management APIs like ::cuMemAddressReserve, ::cuMemCreate, ::cuMemMap and related APIs */ - CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED = 103, /**< Device supports exporting memory to a posix file descriptor with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate */ - CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED = 104, /**< Device supports exporting memory to a Win32 NT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate */ - CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED = 105, /**< Device supports exporting memory to a Win32 KMT handle with ::cuMemExportToShareableHandle, if requested ::cuMemCreate */ - CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR = 106, /**< Maximum number of blocks per multiprocessor */ - CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED = 107, /**< Device supports compression of memory */ - CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE = 108, /**< Maximum L2 persisting lines capacity setting in bytes. */ - CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE = 109, /**< Maximum value of CUaccessPolicyWindow::num_bytes. */ - CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WITH_CUDA_VMM_SUPPORTED = 110, /**< Device supports specifying the GPUDirect RDMA flag with ::cuMemCreate */ - CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK = 111, /**< Shared memory reserved by CUDA driver per block in bytes */ - CU_DEVICE_ATTRIBUTE_SPARSE_CUDA_ARRAY_SUPPORTED = 112, /**< Device supports sparse CUDA arrays and sparse CUDA mipmapped arrays */ - CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED = 113, /**< Device supports using the ::cuMemHostRegister flag CU_MEMHOSTERGISTER_READ_ONLY to register memory that must be mapped as read-only to the GPU */ - CU_DEVICE_ATTRIBUTE_TIMELINE_SEMAPHORE_INTEROP_SUPPORTED = 114, /**< External timeline semaphore interop is supported on the device */ - CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED = 115, /**< Device supports using the ::cuMemAllocAsync and ::cuMemPool family of APIs */ - CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_SUPPORTED = 116, /**< Device supports GPUDirect RDMA APIs, like nvidia_p2p_get_pages (see https://docs.nvidia.com/cuda/gpudirect-rdma for more information) */ - CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS = 117, /**< The returned attribute shall be interpreted as a bitmask, where the individual bits are described by the ::CUflushGPUDirectRDMAWritesOptions enum */ - CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING = 118, /**< GPUDirect RDMA writes to the device do not need to be flushed for consumers within the scope indicated by the returned attribute. See ::CUGPUDirectRDMAWritesOrdering for the numerical values returned here. */ - CU_DEVICE_ATTRIBUTE_MEMPOOL_SUPPORTED_HANDLE_TYPES = 119, /**< Handle types supported with mempool based IPC */ - CU_DEVICE_ATTRIBUTE_MAX -} CUdevice_attribute; - -/** - * Legacy device properties - */ -typedef struct CUdevprop_st { - int maxThreadsPerBlock; /**< Maximum number of threads per block */ - int maxThreadsDim[3]; /**< Maximum size of each dimension of a block */ - int maxGridSize[3]; /**< Maximum size of each dimension of a grid */ - int sharedMemPerBlock; /**< Shared memory available per block in bytes */ - int totalConstantMemory; /**< Constant memory available on device in bytes */ - int SIMDWidth; /**< Warp size in threads */ - int memPitch; /**< Maximum pitch in bytes allowed by memory copies */ - int regsPerBlock; /**< 32-bit registers available per block */ - int clockRate; /**< Clock frequency in kilohertz */ - int textureAlign; /**< Alignment requirement for textures */ -} CUdevprop_v1; -typedef CUdevprop_v1 CUdevprop; - -/** - * Pointer information - */ -typedef enum CUpointer_attribute_enum { - CU_POINTER_ATTRIBUTE_CONTEXT = 1, /**< The ::CUcontext on which a pointer was allocated or registered */ - CU_POINTER_ATTRIBUTE_MEMORY_TYPE = 2, /**< The ::CUmemorytype describing the physical location of a pointer */ - CU_POINTER_ATTRIBUTE_DEVICE_POINTER = 3, /**< The address at which a pointer's memory may be accessed on the device */ - CU_POINTER_ATTRIBUTE_HOST_POINTER = 4, /**< The address at which a pointer's memory may be accessed on the host */ - CU_POINTER_ATTRIBUTE_P2P_TOKENS = 5, /**< A pair of tokens for use with the nv-p2p.h Linux kernel interface */ - CU_POINTER_ATTRIBUTE_SYNC_MEMOPS = 6, /**< Synchronize every synchronous memory operation initiated on this region */ - CU_POINTER_ATTRIBUTE_BUFFER_ID = 7, /**< A process-wide unique ID for an allocated memory region*/ - CU_POINTER_ATTRIBUTE_IS_MANAGED = 8, /**< Indicates if the pointer points to managed memory */ - CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL = 9, /**< A device ordinal of a device on which a pointer was allocated or registered */ - CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE = 10, /**< 1 if this pointer maps to an allocation that is suitable for ::cudaIpcGetMemHandle, 0 otherwise **/ - CU_POINTER_ATTRIBUTE_RANGE_START_ADDR = 11, /**< Starting address for this requested pointer */ - CU_POINTER_ATTRIBUTE_RANGE_SIZE = 12, /**< Size of the address range for this requested pointer */ - CU_POINTER_ATTRIBUTE_MAPPED = 13, /**< 1 if this pointer is in a valid address range that is mapped to a backing allocation, 0 otherwise **/ - CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES = 14, /**< Bitmask of allowed ::CUmemAllocationHandleType for this allocation **/ - CU_POINTER_ATTRIBUTE_IS_GPU_DIRECT_RDMA_CAPABLE = 15, /**< 1 if the memory this pointer is referencing can be used with the GPUDirect RDMA API **/ - CU_POINTER_ATTRIBUTE_ACCESS_FLAGS = 16, /**< Returns the access flags the device associated with the current context has on the corresponding memory referenced by the pointer given */ - CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE = 17 /**< Returns the mempool handle for the allocation if it was allocated from a mempool. Otherwise returns NULL. **/ -} CUpointer_attribute; - -/** - * Function properties - */ -typedef enum CUfunction_attribute_enum { - /** - * The maximum number of threads per block, beyond which a launch of the - * function would fail. This number depends on both the function and the - * device on which the function is currently loaded. - */ - CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK = 0, - - /** - * The size in bytes of statically-allocated shared memory required by - * this function. This does not include dynamically-allocated shared - * memory requested by the user at runtime. - */ - CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES = 1, - - /** - * The size in bytes of user-allocated constant memory required by this - * function. - */ - CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES = 2, - - /** - * The size in bytes of local memory used by each thread of this function. - */ - CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES = 3, - - /** - * The number of registers used by each thread of this function. - */ - CU_FUNC_ATTRIBUTE_NUM_REGS = 4, - - /** - * The PTX virtual architecture version for which the function was - * compiled. This value is the major PTX version * 10 + the minor PTX - * version, so a PTX version 1.3 function would return the value 13. - * Note that this may return the undefined value of 0 for cubins - * compiled prior to CUDA 3.0. - */ - CU_FUNC_ATTRIBUTE_PTX_VERSION = 5, - - /** - * The binary architecture version for which the function was compiled. - * This value is the major binary version * 10 + the minor binary version, - * so a binary version 1.3 function would return the value 13. Note that - * this will return a value of 10 for legacy cubins that do not have a - * properly-encoded binary architecture version. - */ - CU_FUNC_ATTRIBUTE_BINARY_VERSION = 6, - - /** - * The attribute to indicate whether the function has been compiled with - * user specified option "-Xptxas --dlcm=ca" set . - */ - CU_FUNC_ATTRIBUTE_CACHE_MODE_CA = 7, - - /** - * The maximum size in bytes of dynamically-allocated shared memory that can be used by - * this function. If the user-specified dynamic shared memory size is larger than this - * value, the launch will fail. - * See ::cuFuncSetAttribute - */ - CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES = 8, - - /** - * On devices where the L1 cache and shared memory use the same hardware resources, - * this sets the shared memory carveout preference, in percent of the total shared memory. - * Refer to ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR. - * This is only a hint, and the driver can choose a different ratio if required to execute the function. - * See ::cuFuncSetAttribute - */ - CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT = 9, - - CU_FUNC_ATTRIBUTE_MAX -} CUfunction_attribute; - -/** - * Function cache configurations - */ -typedef enum CUfunc_cache_enum { - CU_FUNC_CACHE_PREFER_NONE = 0x00, /**< no preference for shared memory or L1 (default) */ - CU_FUNC_CACHE_PREFER_SHARED = 0x01, /**< prefer larger shared memory and smaller L1 cache */ - CU_FUNC_CACHE_PREFER_L1 = 0x02, /**< prefer larger L1 cache and smaller shared memory */ - CU_FUNC_CACHE_PREFER_EQUAL = 0x03 /**< prefer equal sized L1 cache and shared memory */ -} CUfunc_cache; - -/** - * Shared memory configurations - */ -typedef enum CUsharedconfig_enum { - CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE = 0x00, /**< set default shared memory bank size */ - CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE = 0x01, /**< set shared memory bank width to four bytes */ - CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE = 0x02 /**< set shared memory bank width to eight bytes */ -} CUsharedconfig; - -/** - * Shared memory carveout configurations. These may be passed to ::cuFuncSetAttribute - */ -typedef enum CUshared_carveout_enum { - CU_SHAREDMEM_CARVEOUT_DEFAULT = -1, /**< No preference for shared memory or L1 (default) */ - CU_SHAREDMEM_CARVEOUT_MAX_SHARED = 100, /**< Prefer maximum available shared memory, minimum L1 cache */ - CU_SHAREDMEM_CARVEOUT_MAX_L1 = 0 /**< Prefer maximum available L1 cache, minimum shared memory */ -} CUshared_carveout; - -/** - * Memory types - */ -typedef enum CUmemorytype_enum { - CU_MEMORYTYPE_HOST = 0x01, /**< Host memory */ - CU_MEMORYTYPE_DEVICE = 0x02, /**< Device memory */ - CU_MEMORYTYPE_ARRAY = 0x03, /**< Array memory */ - CU_MEMORYTYPE_UNIFIED = 0x04 /**< Unified device or host memory */ -} CUmemorytype; - -/** - * Compute Modes - */ -typedef enum CUcomputemode_enum { - CU_COMPUTEMODE_DEFAULT = 0, /**< Default compute mode (Multiple contexts allowed per device) */ - CU_COMPUTEMODE_PROHIBITED = 2, /**< Compute-prohibited mode (No contexts can be created on this device at this time) */ - CU_COMPUTEMODE_EXCLUSIVE_PROCESS = 3 /**< Compute-exclusive-process mode (Only one context used by a single process can be present on this device at a time) */ -} CUcomputemode; - -/** - * Memory advise values - */ -typedef enum CUmem_advise_enum { - CU_MEM_ADVISE_SET_READ_MOSTLY = 1, /**< Data will mostly be read and only occasionally be written to */ - CU_MEM_ADVISE_UNSET_READ_MOSTLY = 2, /**< Undo the effect of ::CU_MEM_ADVISE_SET_READ_MOSTLY */ - CU_MEM_ADVISE_SET_PREFERRED_LOCATION = 3, /**< Set the preferred location for the data as the specified device */ - CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION = 4, /**< Clear the preferred location for the data */ - CU_MEM_ADVISE_SET_ACCESSED_BY = 5, /**< Data will be accessed by the specified device, so prevent page faults as much as possible */ - CU_MEM_ADVISE_UNSET_ACCESSED_BY = 6 /**< Let the Unified Memory subsystem decide on the page faulting policy for the specified device */ -} CUmem_advise; - -typedef enum CUmem_range_attribute_enum { - CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY = 1, /**< Whether the range will mostly be read and only occasionally be written to */ - CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION = 2, /**< The preferred location of the range */ - CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY = 3, /**< Memory range has ::CU_MEM_ADVISE_SET_ACCESSED_BY set for specified device */ - CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION = 4 /**< The last location to which the range was prefetched */ -} CUmem_range_attribute; - -/** - * Online compiler and linker options - */ -typedef enum CUjit_option_enum -{ - /** - * Max number of registers that a thread may use.\n - * Option type: unsigned int\n - * Applies to: compiler only - */ - CU_JIT_MAX_REGISTERS = 0, - - /** - * IN: Specifies minimum number of threads per block to target compilation - * for\n - * OUT: Returns the number of threads the compiler actually targeted. - * This restricts the resource utilization of the compiler (e.g. max - * registers) such that a block with the given number of threads should be - * able to launch based on register limitations. Note, this option does not - * currently take into account any other resource limitations, such as - * shared memory utilization.\n - * Cannot be combined with ::CU_JIT_TARGET.\n - * Option type: unsigned int\n - * Applies to: compiler only - */ - CU_JIT_THREADS_PER_BLOCK, - - /** - * Overwrites the option value with the total wall clock time, in - * milliseconds, spent in the compiler and linker\n - * Option type: float\n - * Applies to: compiler and linker - */ - CU_JIT_WALL_TIME, - - /** - * Pointer to a buffer in which to print any log messages - * that are informational in nature (the buffer size is specified via - * option ::CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES)\n - * Option type: char *\n - * Applies to: compiler and linker - */ - CU_JIT_INFO_LOG_BUFFER, - - /** - * IN: Log buffer size in bytes. Log messages will be capped at this size - * (including null terminator)\n - * OUT: Amount of log buffer filled with messages\n - * Option type: unsigned int\n - * Applies to: compiler and linker - */ - CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES, - - /** - * Pointer to a buffer in which to print any log messages that - * reflect errors (the buffer size is specified via option - * ::CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES)\n - * Option type: char *\n - * Applies to: compiler and linker - */ - CU_JIT_ERROR_LOG_BUFFER, - - /** - * IN: Log buffer size in bytes. Log messages will be capped at this size - * (including null terminator)\n - * OUT: Amount of log buffer filled with messages\n - * Option type: unsigned int\n - * Applies to: compiler and linker - */ - CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES, - - /** - * Level of optimizations to apply to generated code (0 - 4), with 4 - * being the default and highest level of optimizations.\n - * Option type: unsigned int\n - * Applies to: compiler only - */ - CU_JIT_OPTIMIZATION_LEVEL, - - /** - * No option value required. Determines the target based on the current - * attached context (default)\n - * Option type: No option value needed\n - * Applies to: compiler and linker - */ - CU_JIT_TARGET_FROM_CUCONTEXT, - - /** - * Target is chosen based on supplied ::CUjit_target. Cannot be - * combined with ::CU_JIT_THREADS_PER_BLOCK.\n - * Option type: unsigned int for enumerated type ::CUjit_target\n - * Applies to: compiler and linker - */ - CU_JIT_TARGET, - - /** - * Specifies choice of fallback strategy if matching cubin is not found. - * Choice is based on supplied ::CUjit_fallback. This option cannot be - * used with cuLink* APIs as the linker requires exact matches.\n - * Option type: unsigned int for enumerated type ::CUjit_fallback\n - * Applies to: compiler only - */ - CU_JIT_FALLBACK_STRATEGY, - - /** - * Specifies whether to create debug information in output (-g) - * (0: false, default)\n - * Option type: int\n - * Applies to: compiler and linker - */ - CU_JIT_GENERATE_DEBUG_INFO, - - /** - * Generate verbose log messages (0: false, default)\n - * Option type: int\n - * Applies to: compiler and linker - */ - CU_JIT_LOG_VERBOSE, - - /** - * Generate line number information (-lineinfo) (0: false, default)\n - * Option type: int\n - * Applies to: compiler only - */ - CU_JIT_GENERATE_LINE_INFO, - - /** - * Specifies whether to enable caching explicitly (-dlcm) \n - * Choice is based on supplied ::CUjit_cacheMode_enum.\n - * Option type: unsigned int for enumerated type ::CUjit_cacheMode_enum\n - * Applies to: compiler only - */ - CU_JIT_CACHE_MODE, - - /** - * The below jit options are used for internal purposes only, in this version of CUDA - */ - CU_JIT_NEW_SM3X_OPT, - CU_JIT_FAST_COMPILE, - - /** - * Array of device symbol names that will be relocated to the corresponding - * host addresses stored in ::CU_JIT_GLOBAL_SYMBOL_ADDRESSES.\n - * Must contain ::CU_JIT_GLOBAL_SYMBOL_COUNT entries.\n - * When loading a device module, driver will relocate all encountered - * unresolved symbols to the host addresses.\n - * It is only allowed to register symbols that correspond to unresolved - * global variables.\n - * It is illegal to register the same device symbol at multiple addresses.\n - * Option type: const char **\n - * Applies to: dynamic linker only - */ - CU_JIT_GLOBAL_SYMBOL_NAMES, - - /** - * Array of host addresses that will be used to relocate corresponding - * device symbols stored in ::CU_JIT_GLOBAL_SYMBOL_NAMES.\n - * Must contain ::CU_JIT_GLOBAL_SYMBOL_COUNT entries.\n - * Option type: void **\n - * Applies to: dynamic linker only - */ - CU_JIT_GLOBAL_SYMBOL_ADDRESSES, - - /** - * Number of entries in ::CU_JIT_GLOBAL_SYMBOL_NAMES and - * ::CU_JIT_GLOBAL_SYMBOL_ADDRESSES arrays.\n - * Option type: unsigned int\n - * Applies to: dynamic linker only - */ - CU_JIT_GLOBAL_SYMBOL_COUNT, - - /** - * Enable link-time optimization (-dlto) for device code (0: false, default)\n - * Option type: int\n - * Applies to: compiler and linker - */ - CU_JIT_LTO, - - /** - * Control single-precision denormals (-ftz) support (0: false, default). - * 1 : flushes denormal values to zero - * 0 : preserves denormal values - * Option type: int\n - * Applies to: link-time optimization specified with CU_JIT_LTO - */ - CU_JIT_FTZ, - - /** - * Control single-precision floating-point division and reciprocals - * (-prec-div) support (1: true, default). - * 1 : Enables the IEEE round-to-nearest mode - * 0 : Enables the fast approximation mode - * Option type: int\n - * Applies to: link-time optimization specified with CU_JIT_LTO - */ - CU_JIT_PREC_DIV, - - /** - * Control single-precision floating-point square root - * (-prec-sqrt) support (1: true, default). - * 1 : Enables the IEEE round-to-nearest mode - * 0 : Enables the fast approximation mode - * Option type: int\n - * Applies to: link-time optimization specified with CU_JIT_LTO - */ - CU_JIT_PREC_SQRT, - - /** - * Enable/Disable the contraction of floating-point multiplies - * and adds/subtracts into floating-point multiply-add (-fma) - * operations (1: Enable, default; 0: Disable). - * Option type: int\n - * Applies to: link-time optimization specified with CU_JIT_LTO - */ - CU_JIT_FMA, - - CU_JIT_NUM_OPTIONS - -} CUjit_option; - -/** - * Online compilation targets - */ -typedef enum CUjit_target_enum -{ - CU_TARGET_COMPUTE_20 = 20, /**< Compute device class 2.0 */ - CU_TARGET_COMPUTE_21 = 21, /**< Compute device class 2.1 */ - CU_TARGET_COMPUTE_30 = 30, /**< Compute device class 3.0 */ - CU_TARGET_COMPUTE_32 = 32, /**< Compute device class 3.2 */ - CU_TARGET_COMPUTE_35 = 35, /**< Compute device class 3.5 */ - CU_TARGET_COMPUTE_37 = 37, /**< Compute device class 3.7 */ - CU_TARGET_COMPUTE_50 = 50, /**< Compute device class 5.0 */ - CU_TARGET_COMPUTE_52 = 52, /**< Compute device class 5.2 */ - CU_TARGET_COMPUTE_53 = 53, /**< Compute device class 5.3 */ - CU_TARGET_COMPUTE_60 = 60, /**< Compute device class 6.0.*/ - CU_TARGET_COMPUTE_61 = 61, /**< Compute device class 6.1.*/ - CU_TARGET_COMPUTE_62 = 62, /**< Compute device class 6.2.*/ - CU_TARGET_COMPUTE_70 = 70, /**< Compute device class 7.0.*/ - CU_TARGET_COMPUTE_72 = 72, /**< Compute device class 7.2.*/ - CU_TARGET_COMPUTE_75 = 75, /**< Compute device class 7.5.*/ - CU_TARGET_COMPUTE_80 = 80, /**< Compute device class 8.0.*/ - CU_TARGET_COMPUTE_86 = 86 /**< Compute device class 8.6.*/ -} CUjit_target; - -/** - * Cubin matching fallback strategies - */ -typedef enum CUjit_fallback_enum -{ - CU_PREFER_PTX = 0, /**< Prefer to compile ptx if exact binary match not found */ - - CU_PREFER_BINARY /**< Prefer to fall back to compatible binary code if exact match not found */ - -} CUjit_fallback; - -/** - * Caching modes for dlcm - */ -typedef enum CUjit_cacheMode_enum -{ - CU_JIT_CACHE_OPTION_NONE = 0, /**< Compile with no -dlcm flag specified */ - CU_JIT_CACHE_OPTION_CG, /**< Compile with L1 cache disabled */ - CU_JIT_CACHE_OPTION_CA /**< Compile with L1 cache enabled */ -} CUjit_cacheMode; - -/** - * Device code formats - */ -typedef enum CUjitInputType_enum -{ - /** - * Compiled device-class-specific device code\n - * Applicable options: none - */ - CU_JIT_INPUT_CUBIN = 0, - - /** - * PTX source code\n - * Applicable options: PTX compiler options - */ - CU_JIT_INPUT_PTX, - - /** - * Bundle of multiple cubins and/or PTX of some device code\n - * Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY - */ - CU_JIT_INPUT_FATBINARY, - - /** - * Host object with embedded device code\n - * Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY - */ - CU_JIT_INPUT_OBJECT, - - /** - * Archive of host objects with embedded device code\n - * Applicable options: PTX compiler options, ::CU_JIT_FALLBACK_STRATEGY - */ - CU_JIT_INPUT_LIBRARY, - - /** - * High-level intermediate code for link-time optimization\n - * Applicable options: NVVM compiler options, PTX compiler options - */ - CU_JIT_INPUT_NVVM, - - CU_JIT_NUM_INPUT_TYPES -} CUjitInputType; - -typedef struct CUlinkState_st *CUlinkState; - -/** - * Flags to register a graphics resource - */ -typedef enum CUgraphicsRegisterFlags_enum { - CU_GRAPHICS_REGISTER_FLAGS_NONE = 0x00, - CU_GRAPHICS_REGISTER_FLAGS_READ_ONLY = 0x01, - CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD = 0x02, - CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST = 0x04, - CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER = 0x08 -} CUgraphicsRegisterFlags; - -/** - * Flags for mapping and unmapping interop resources - */ -typedef enum CUgraphicsMapResourceFlags_enum { - CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE = 0x00, - CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY = 0x01, - CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD = 0x02 -} CUgraphicsMapResourceFlags; - -/** - * Array indices for cube faces - */ -typedef enum CUarray_cubemap_face_enum { - CU_CUBEMAP_FACE_POSITIVE_X = 0x00, /**< Positive X face of cubemap */ - CU_CUBEMAP_FACE_NEGATIVE_X = 0x01, /**< Negative X face of cubemap */ - CU_CUBEMAP_FACE_POSITIVE_Y = 0x02, /**< Positive Y face of cubemap */ - CU_CUBEMAP_FACE_NEGATIVE_Y = 0x03, /**< Negative Y face of cubemap */ - CU_CUBEMAP_FACE_POSITIVE_Z = 0x04, /**< Positive Z face of cubemap */ - CU_CUBEMAP_FACE_NEGATIVE_Z = 0x05 /**< Negative Z face of cubemap */ -} CUarray_cubemap_face; - -/** - * Limits - */ -typedef enum CUlimit_enum { - CU_LIMIT_STACK_SIZE = 0x00, /**< GPU thread stack size */ - CU_LIMIT_PRINTF_FIFO_SIZE = 0x01, /**< GPU printf FIFO size */ - CU_LIMIT_MALLOC_HEAP_SIZE = 0x02, /**< GPU malloc heap size */ - CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH = 0x03, /**< GPU device runtime launch synchronize depth */ - CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT = 0x04, /**< GPU device runtime pending launch count */ - CU_LIMIT_MAX_L2_FETCH_GRANULARITY = 0x05, /**< A value between 0 and 128 that indicates the maximum fetch granularity of L2 (in Bytes). This is a hint */ - CU_LIMIT_PERSISTING_L2_CACHE_SIZE = 0x06, /**< A size in bytes for L2 persisting lines cache size */ - CU_LIMIT_MAX -} CUlimit; - -/** - * Resource types - */ -typedef enum CUresourcetype_enum { - CU_RESOURCE_TYPE_ARRAY = 0x00, /**< Array resource */ - CU_RESOURCE_TYPE_MIPMAPPED_ARRAY = 0x01, /**< Mipmapped array resource */ - CU_RESOURCE_TYPE_LINEAR = 0x02, /**< Linear resource */ - CU_RESOURCE_TYPE_PITCH2D = 0x03 /**< Pitch 2D resource */ -} CUresourcetype; - -#ifdef _WIN32 -#define CUDA_CB __stdcall -#else -#define CUDA_CB -#endif - -/** - * CUDA host function - * \param userData Argument value passed to the function - */ -typedef void (CUDA_CB *CUhostFn)(void *userData); - -/** - * Specifies performance hint with ::CUaccessPolicyWindow for hitProp and missProp members. - */ -typedef enum CUaccessProperty_enum { - CU_ACCESS_PROPERTY_NORMAL = 0, /**< Normal cache persistence. */ - CU_ACCESS_PROPERTY_STREAMING = 1, /**< Streaming access is less likely to persit from cache. */ - CU_ACCESS_PROPERTY_PERSISTING = 2 /**< Persisting access is more likely to persist in cache.*/ -} CUaccessProperty; - -/** - * Specifies an access policy for a window, a contiguous extent of memory - * beginning at base_ptr and ending at base_ptr + num_bytes. - * num_bytes is limited by CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE. - * Partition into many segments and assign segments such that: - * sum of "hit segments" / window == approx. ratio. - * sum of "miss segments" / window == approx 1-ratio. - * Segments and ratio specifications are fitted to the capabilities of - * the architecture. - * Accesses in a hit segment apply the hitProp access policy. - * Accesses in a miss segment apply the missProp access policy. - */ -typedef struct CUaccessPolicyWindow_st { - void *base_ptr; /**< Starting address of the access policy window. CUDA driver may align it. */ - size_t num_bytes; /**< Size in bytes of the window policy. CUDA driver may restrict the maximum size and alignment. */ - float hitRatio; /**< hitRatio specifies percentage of lines assigned hitProp, rest are assigned missProp. */ - CUaccessProperty hitProp; /**< ::CUaccessProperty set for hit. */ - CUaccessProperty missProp; /**< ::CUaccessProperty set for miss. Must be either NORMAL or STREAMING */ -} CUaccessPolicyWindow_v1; -typedef CUaccessPolicyWindow_v1 CUaccessPolicyWindow; - -/** - * GPU kernel node parameters - */ -typedef struct CUDA_KERNEL_NODE_PARAMS_st { - CUfunction func; /**< Kernel to launch */ - unsigned int gridDimX; /**< Width of grid in blocks */ - unsigned int gridDimY; /**< Height of grid in blocks */ - unsigned int gridDimZ; /**< Depth of grid in blocks */ - unsigned int blockDimX; /**< X dimension of each thread block */ - unsigned int blockDimY; /**< Y dimension of each thread block */ - unsigned int blockDimZ; /**< Z dimension of each thread block */ - unsigned int sharedMemBytes; /**< Dynamic shared-memory size per thread block in bytes */ - void **kernelParams; /**< Array of pointers to kernel parameters */ - void **extra; /**< Extra options */ -} CUDA_KERNEL_NODE_PARAMS_v1; -typedef CUDA_KERNEL_NODE_PARAMS_v1 CUDA_KERNEL_NODE_PARAMS; - -/** - * Memset node parameters - */ -typedef struct CUDA_MEMSET_NODE_PARAMS_st { - CUdeviceptr dst; /**< Destination device pointer */ - size_t pitch; /**< Pitch of destination device pointer. Unused if height is 1 */ - unsigned int value; /**< Value to be set */ - unsigned int elementSize; /**< Size of each element in bytes. Must be 1, 2, or 4. */ - size_t width; /**< Width of the row in elements */ - size_t height; /**< Number of rows */ -} CUDA_MEMSET_NODE_PARAMS_v1; -typedef CUDA_MEMSET_NODE_PARAMS_v1 CUDA_MEMSET_NODE_PARAMS; - -/** - * Host node parameters - */ -typedef struct CUDA_HOST_NODE_PARAMS_st { - CUhostFn fn; /**< The function to call when the node executes */ - void* userData; /**< Argument to pass to the function */ -} CUDA_HOST_NODE_PARAMS_v1; -typedef CUDA_HOST_NODE_PARAMS_v1 CUDA_HOST_NODE_PARAMS; - -/** - * Graph node types - */ -typedef enum CUgraphNodeType_enum { - CU_GRAPH_NODE_TYPE_KERNEL = 0, /**< GPU kernel node */ - CU_GRAPH_NODE_TYPE_MEMCPY = 1, /**< Memcpy node */ - CU_GRAPH_NODE_TYPE_MEMSET = 2, /**< Memset node */ - CU_GRAPH_NODE_TYPE_HOST = 3, /**< Host (executable) node */ - CU_GRAPH_NODE_TYPE_GRAPH = 4, /**< Node which executes an embedded graph */ - CU_GRAPH_NODE_TYPE_EMPTY = 5, /**< Empty (no-op) node */ - CU_GRAPH_NODE_TYPE_WAIT_EVENT = 6, /**< External event wait node */ - CU_GRAPH_NODE_TYPE_EVENT_RECORD = 7, /**< External event record node */ - CU_GRAPH_NODE_TYPE_EXT_SEMAS_SIGNAL = 8, /**< External semaphore signal node */ - CU_GRAPH_NODE_TYPE_EXT_SEMAS_WAIT = 9, /**< External semaphore wait node */ - CU_GRAPH_NODE_TYPE_MEM_ALLOC = 10,/**< Memory Allocation Node */ - CU_GRAPH_NODE_TYPE_MEM_FREE = 11 /**< Memory Free Node */ -} CUgraphNodeType; - -typedef enum CUsynchronizationPolicy_enum { - CU_SYNC_POLICY_AUTO = 1, - CU_SYNC_POLICY_SPIN = 2, - CU_SYNC_POLICY_YIELD = 3, - CU_SYNC_POLICY_BLOCKING_SYNC = 4 -} CUsynchronizationPolicy; - -/** - * Graph kernel node Attributes - */ -typedef enum CUkernelNodeAttrID_enum { - CU_KERNEL_NODE_ATTRIBUTE_ACCESS_POLICY_WINDOW = 1, /**< Identifier for ::CUkernelNodeAttrValue::accessPolicyWindow. */ - CU_KERNEL_NODE_ATTRIBUTE_COOPERATIVE = 2 /**< Allows a kernel node to be cooperative (see ::cuLaunchCooperativeKernel). */ -} CUkernelNodeAttrID; - -/** - * Graph kernel node attributes union, used with ::cuKernelNodeSetAttribute/::cuKernelNodeGetAttribute - */ -typedef union CUkernelNodeAttrValue_union { - CUaccessPolicyWindow accessPolicyWindow; /**< Attribute ::CUaccessPolicyWindow. */ - int cooperative; /**< Nonzero indicates a cooperative kernel (see ::cuLaunchCooperativeKernel). */ -} CUkernelNodeAttrValue_v1; -typedef CUkernelNodeAttrValue_v1 CUkernelNodeAttrValue; - -/** - * Possible stream capture statuses returned by ::cuStreamIsCapturing - */ -typedef enum CUstreamCaptureStatus_enum { - CU_STREAM_CAPTURE_STATUS_NONE = 0, /**< Stream is not capturing */ - CU_STREAM_CAPTURE_STATUS_ACTIVE = 1, /**< Stream is actively capturing */ - CU_STREAM_CAPTURE_STATUS_INVALIDATED = 2 /**< Stream is part of a capture sequence that - has been invalidated, but not terminated */ -} CUstreamCaptureStatus; - -/** - * Possible modes for stream capture thread interactions. For more details see - * ::cuStreamBeginCapture and ::cuThreadExchangeStreamCaptureMode - */ -typedef enum CUstreamCaptureMode_enum { - CU_STREAM_CAPTURE_MODE_GLOBAL = 0, - CU_STREAM_CAPTURE_MODE_THREAD_LOCAL = 1, - CU_STREAM_CAPTURE_MODE_RELAXED = 2 -} CUstreamCaptureMode; - -/** - * Stream Attributes - */ -typedef enum CUstreamAttrID_enum { - CU_STREAM_ATTRIBUTE_ACCESS_POLICY_WINDOW = 1, /**< Identifier for ::CUstreamAttrValue::accessPolicyWindow. */ - CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY = 3 /**< ::CUsynchronizationPolicy for work queued up in this stream */ -} CUstreamAttrID; - -/** - * Stream attributes union, used with ::cuStreamSetAttribute/::cuStreamGetAttribute - */ -typedef union CUstreamAttrValue_union { - CUaccessPolicyWindow accessPolicyWindow; /**< Attribute ::CUaccessPolicyWindow. */ - CUsynchronizationPolicy syncPolicy; /**< Value for ::CU_STREAM_ATTRIBUTE_SYNCHRONIZATION_POLICY. */ -} CUstreamAttrValue_v1; -typedef CUstreamAttrValue_v1 CUstreamAttrValue; - -/** - * Flags to specify search options. For more details see ::cuGetProcAddress - */ -typedef enum CUdriverProcAddress_flags_enum { - CU_GET_PROC_ADDRESS_DEFAULT = 0, /**< Default search mode for driver symbols. */ - CU_GET_PROC_ADDRESS_LEGACY_STREAM = 1 << 0, /**< Search for legacy versions of driver symbols. */ - CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM = 1 << 1 /**< Search for per-thread versions of driver symbols. */ -} CUdriverProcAddress_flags; - -/** - * Execution Affinity Types - */ -typedef enum CUexecAffinityType_enum { - CU_EXEC_AFFINITY_TYPE_SM_COUNT = 0, /**< Create a context with limited SMs. */ - CU_EXEC_AFFINITY_TYPE_MAX -} CUexecAffinityType; - -/** - * Value for ::CU_EXEC_AFFINITY_TYPE_SM_COUNT - */ -typedef struct CUexecAffinitySmCount_st { - unsigned int val; /**< The number of SMs the context is limited to use. */ -} CUexecAffinitySmCount_v1; -typedef CUexecAffinitySmCount_v1 CUexecAffinitySmCount; - -/** - * Execution Affinity Parameters - */ -typedef struct CUexecAffinityParam_st { - CUexecAffinityType type; - union { - CUexecAffinitySmCount smCount; /** Value for ::CU_EXEC_AFFINITY_TYPE_SM_COUNT */ - } param; -} CUexecAffinityParam_v1; -typedef CUexecAffinityParam_v1 CUexecAffinityParam; - -/** - * Error codes - */ -typedef enum cudaError_enum { - /** - * The API call returned with no errors. In the case of query calls, this - * also means that the operation being queried is complete (see - * ::cuEventQuery() and ::cuStreamQuery()). - */ - CUDA_SUCCESS = 0, - - /** - * This indicates that one or more of the parameters passed to the API call - * is not within an acceptable range of values. - */ - CUDA_ERROR_INVALID_VALUE = 1, - - /** - * The API call failed because it was unable to allocate enough memory to - * perform the requested operation. - */ - CUDA_ERROR_OUT_OF_MEMORY = 2, - - /** - * This indicates that the CUDA driver has not been initialized with - * ::cuInit() or that initialization has failed. - */ - CUDA_ERROR_NOT_INITIALIZED = 3, - - /** - * This indicates that the CUDA driver is in the process of shutting down. - */ - CUDA_ERROR_DEINITIALIZED = 4, - - /** - * This indicates profiler is not initialized for this run. This can - * happen when the application is running with external profiling tools - * like visual profiler. - */ - CUDA_ERROR_PROFILER_DISABLED = 5, - - /** - * \deprecated - * This error return is deprecated as of CUDA 5.0. It is no longer an error - * to attempt to enable/disable the profiling via ::cuProfilerStart or - * ::cuProfilerStop without initialization. - */ - CUDA_ERROR_PROFILER_NOT_INITIALIZED = 6, - - /** - * \deprecated - * This error return is deprecated as of CUDA 5.0. It is no longer an error - * to call cuProfilerStart() when profiling is already enabled. - */ - CUDA_ERROR_PROFILER_ALREADY_STARTED = 7, - - /** - * \deprecated - * This error return is deprecated as of CUDA 5.0. It is no longer an error - * to call cuProfilerStop() when profiling is already disabled. - */ - CUDA_ERROR_PROFILER_ALREADY_STOPPED = 8, - - /** - * This indicates that the CUDA driver that the application has loaded is a - * stub library. Applications that run with the stub rather than a real - * driver loaded will result in CUDA API returning this error. - */ - CUDA_ERROR_STUB_LIBRARY = 34, - - /** - * This indicates that no CUDA-capable devices were detected by the installed - * CUDA driver. - */ - CUDA_ERROR_NO_DEVICE = 100, - - /** - * This indicates that the device ordinal supplied by the user does not - * correspond to a valid CUDA device or that the action requested is - * invalid for the specified device. - */ - CUDA_ERROR_INVALID_DEVICE = 101, - - /** - * This error indicates that the Grid license is not applied. - */ - CUDA_ERROR_DEVICE_NOT_LICENSED = 102, - - /** - * This indicates that the device kernel image is invalid. This can also - * indicate an invalid CUDA module. - */ - CUDA_ERROR_INVALID_IMAGE = 200, - - /** - * This most frequently indicates that there is no context bound to the - * current thread. This can also be returned if the context passed to an - * API call is not a valid handle (such as a context that has had - * ::cuCtxDestroy() invoked on it). This can also be returned if a user - * mixes different API versions (i.e. 3010 context with 3020 API calls). - * See ::cuCtxGetApiVersion() for more details. - */ - CUDA_ERROR_INVALID_CONTEXT = 201, - - /** - * This indicated that the context being supplied as a parameter to the - * API call was already the active context. - * \deprecated - * This error return is deprecated as of CUDA 3.2. It is no longer an - * error to attempt to push the active context via ::cuCtxPushCurrent(). - */ - CUDA_ERROR_CONTEXT_ALREADY_CURRENT = 202, - - /** - * This indicates that a map or register operation has failed. - */ - CUDA_ERROR_MAP_FAILED = 205, - - /** - * This indicates that an unmap or unregister operation has failed. - */ - CUDA_ERROR_UNMAP_FAILED = 206, - - /** - * This indicates that the specified array is currently mapped and thus - * cannot be destroyed. - */ - CUDA_ERROR_ARRAY_IS_MAPPED = 207, - - /** - * This indicates that the resource is already mapped. - */ - CUDA_ERROR_ALREADY_MAPPED = 208, - - /** - * This indicates that there is no kernel image available that is suitable - * for the device. This can occur when a user specifies code generation - * options for a particular CUDA source file that do not include the - * corresponding device configuration. - */ - CUDA_ERROR_NO_BINARY_FOR_GPU = 209, - - /** - * This indicates that a resource has already been acquired. - */ - CUDA_ERROR_ALREADY_ACQUIRED = 210, - - /** - * This indicates that a resource is not mapped. - */ - CUDA_ERROR_NOT_MAPPED = 211, - - /** - * This indicates that a mapped resource is not available for access as an - * array. - */ - CUDA_ERROR_NOT_MAPPED_AS_ARRAY = 212, - - /** - * This indicates that a mapped resource is not available for access as a - * pointer. - */ - CUDA_ERROR_NOT_MAPPED_AS_POINTER = 213, - - /** - * This indicates that an uncorrectable ECC error was detected during - * execution. - */ - CUDA_ERROR_ECC_UNCORRECTABLE = 214, - - /** - * This indicates that the ::CUlimit passed to the API call is not - * supported by the active device. - */ - CUDA_ERROR_UNSUPPORTED_LIMIT = 215, - - /** - * This indicates that the ::CUcontext passed to the API call can - * only be bound to a single CPU thread at a time but is already - * bound to a CPU thread. - */ - CUDA_ERROR_CONTEXT_ALREADY_IN_USE = 216, - - /** - * This indicates that peer access is not supported across the given - * devices. - */ - CUDA_ERROR_PEER_ACCESS_UNSUPPORTED = 217, - - /** - * This indicates that a PTX JIT compilation failed. - */ - CUDA_ERROR_INVALID_PTX = 218, - - /** - * This indicates an error with OpenGL or DirectX context. - */ - CUDA_ERROR_INVALID_GRAPHICS_CONTEXT = 219, - - /** - * This indicates that an uncorrectable NVLink error was detected during the - * execution. - */ - CUDA_ERROR_NVLINK_UNCORRECTABLE = 220, - - /** - * This indicates that the PTX JIT compiler library was not found. - */ - CUDA_ERROR_JIT_COMPILER_NOT_FOUND = 221, - - /** - * This indicates that the provided PTX was compiled with an unsupported toolchain. - */ - - CUDA_ERROR_UNSUPPORTED_PTX_VERSION = 222, - - /** - * This indicates that the PTX JIT compilation was disabled. - */ - CUDA_ERROR_JIT_COMPILATION_DISABLED = 223, - - /** - * This indicates that the ::CUexecAffinityType passed to the API call is not - * supported by the active device. - */ - CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY = 224, - - /** - * This indicates that the device kernel source is invalid. - */ - CUDA_ERROR_INVALID_SOURCE = 300, - - /** - * This indicates that the file specified was not found. - */ - CUDA_ERROR_FILE_NOT_FOUND = 301, - - /** - * This indicates that a link to a shared object failed to resolve. - */ - CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND = 302, - - /** - * This indicates that initialization of a shared object failed. - */ - CUDA_ERROR_SHARED_OBJECT_INIT_FAILED = 303, - - /** - * This indicates that an OS call failed. - */ - CUDA_ERROR_OPERATING_SYSTEM = 304, - - /** - * This indicates that a resource handle passed to the API call was not - * valid. Resource handles are opaque types like ::CUstream and ::CUevent. - */ - CUDA_ERROR_INVALID_HANDLE = 400, - - /** - * This indicates that a resource required by the API call is not in a - * valid state to perform the requested operation. - */ - CUDA_ERROR_ILLEGAL_STATE = 401, - - /** - * This indicates that a named symbol was not found. Examples of symbols - * are global/constant variable names, driver function names, texture names, - * and surface names. - */ - CUDA_ERROR_NOT_FOUND = 500, - - /** - * This indicates that asynchronous operations issued previously have not - * completed yet. This result is not actually an error, but must be indicated - * differently than ::CUDA_SUCCESS (which indicates completion). Calls that - * may return this value include ::cuEventQuery() and ::cuStreamQuery(). - */ - CUDA_ERROR_NOT_READY = 600, - - /** - * While executing a kernel, the device encountered a - * load or store instruction on an invalid memory address. - * This leaves the process in an inconsistent state and any further CUDA work - * will return the same error. To continue using CUDA, the process must be terminated - * and relaunched. - */ - CUDA_ERROR_ILLEGAL_ADDRESS = 700, - - /** - * This indicates that a launch did not occur because it did not have - * appropriate resources. This error usually indicates that the user has - * attempted to pass too many arguments to the device kernel, or the - * kernel launch specifies too many threads for the kernel's register - * count. Passing arguments of the wrong size (i.e. a 64-bit pointer - * when a 32-bit int is expected) is equivalent to passing too many - * arguments and can also result in this error. - */ - CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES = 701, - - /** - * This indicates that the device kernel took too long to execute. This can - * only occur if timeouts are enabled - see the device attribute - * ::CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT for more information. - * This leaves the process in an inconsistent state and any further CUDA work - * will return the same error. To continue using CUDA, the process must be terminated - * and relaunched. - */ - CUDA_ERROR_LAUNCH_TIMEOUT = 702, - - /** - * This error indicates a kernel launch that uses an incompatible texturing - * mode. - */ - CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING = 703, - - /** - * This error indicates that a call to ::cuCtxEnablePeerAccess() is - * trying to re-enable peer access to a context which has already - * had peer access to it enabled. - */ - CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED = 704, - - /** - * This error indicates that ::cuCtxDisablePeerAccess() is - * trying to disable peer access which has not been enabled yet - * via ::cuCtxEnablePeerAccess(). - */ - CUDA_ERROR_PEER_ACCESS_NOT_ENABLED = 705, - - /** - * This error indicates that the primary context for the specified device - * has already been initialized. - */ - CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE = 708, - - /** - * This error indicates that the context current to the calling thread - * has been destroyed using ::cuCtxDestroy, or is a primary context which - * has not yet been initialized. - */ - CUDA_ERROR_CONTEXT_IS_DESTROYED = 709, - - /** - * A device-side assert triggered during kernel execution. The context - * cannot be used anymore, and must be destroyed. All existing device - * memory allocations from this context are invalid and must be - * reconstructed if the program is to continue using CUDA. - */ - CUDA_ERROR_ASSERT = 710, - - /** - * This error indicates that the hardware resources required to enable - * peer access have been exhausted for one or more of the devices - * passed to ::cuCtxEnablePeerAccess(). - */ - CUDA_ERROR_TOO_MANY_PEERS = 711, - - /** - * This error indicates that the memory range passed to ::cuMemHostRegister() - * has already been registered. - */ - CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED = 712, - - /** - * This error indicates that the pointer passed to ::cuMemHostUnregister() - * does not correspond to any currently registered memory region. - */ - CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED = 713, - - /** - * While executing a kernel, the device encountered a stack error. - * This can be due to stack corruption or exceeding the stack size limit. - * This leaves the process in an inconsistent state and any further CUDA work - * will return the same error. To continue using CUDA, the process must be terminated - * and relaunched. - */ - CUDA_ERROR_HARDWARE_STACK_ERROR = 714, - - /** - * While executing a kernel, the device encountered an illegal instruction. - * This leaves the process in an inconsistent state and any further CUDA work - * will return the same error. To continue using CUDA, the process must be terminated - * and relaunched. - */ - CUDA_ERROR_ILLEGAL_INSTRUCTION = 715, - - /** - * While executing a kernel, the device encountered a load or store instruction - * on a memory address which is not aligned. - * This leaves the process in an inconsistent state and any further CUDA work - * will return the same error. To continue using CUDA, the process must be terminated - * and relaunched. - */ - CUDA_ERROR_MISALIGNED_ADDRESS = 716, - - /** - * While executing a kernel, the device encountered an instruction - * which can only operate on memory locations in certain address spaces - * (global, shared, or local), but was supplied a memory address not - * belonging to an allowed address space. - * This leaves the process in an inconsistent state and any further CUDA work - * will return the same error. To continue using CUDA, the process must be terminated - * and relaunched. - */ - CUDA_ERROR_INVALID_ADDRESS_SPACE = 717, - - /** - * While executing a kernel, the device program counter wrapped its address space. - * This leaves the process in an inconsistent state and any further CUDA work - * will return the same error. To continue using CUDA, the process must be terminated - * and relaunched. - */ - CUDA_ERROR_INVALID_PC = 718, - - /** - * An exception occurred on the device while executing a kernel. Common - * causes include dereferencing an invalid device pointer and accessing - * out of bounds shared memory. Less common cases can be system specific - more - * information about these cases can be found in the system specific user guide. - * This leaves the process in an inconsistent state and any further CUDA work - * will return the same error. To continue using CUDA, the process must be terminated - * and relaunched. - */ - CUDA_ERROR_LAUNCH_FAILED = 719, - - /** - * This error indicates that the number of blocks launched per grid for a kernel that was - * launched via either ::cuLaunchCooperativeKernel or ::cuLaunchCooperativeKernelMultiDevice - * exceeds the maximum number of blocks as allowed by ::cuOccupancyMaxActiveBlocksPerMultiprocessor - * or ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags times the number of multiprocessors - * as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. - */ - CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE = 720, - - /** - * This error indicates that the attempted operation is not permitted. - */ - CUDA_ERROR_NOT_PERMITTED = 800, - - /** - * This error indicates that the attempted operation is not supported - * on the current system or device. - */ - CUDA_ERROR_NOT_SUPPORTED = 801, - - /** - * This error indicates that the system is not yet ready to start any CUDA - * work. To continue using CUDA, verify the system configuration is in a - * valid state and all required driver daemons are actively running. - * More information about this error can be found in the system specific - * user guide. - */ - CUDA_ERROR_SYSTEM_NOT_READY = 802, - - /** - * This error indicates that there is a mismatch between the versions of - * the display driver and the CUDA driver. Refer to the compatibility documentation - * for supported versions. - */ - CUDA_ERROR_SYSTEM_DRIVER_MISMATCH = 803, - - /** - * This error indicates that the system was upgraded to run with forward compatibility - * but the visible hardware detected by CUDA does not support this configuration. - * Refer to the compatibility documentation for the supported hardware matrix or ensure - * that only supported hardware is visible during initialization via the CUDA_VISIBLE_DEVICES - * environment variable. - */ - CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE = 804, - - /** - * This error indicates that the MPS client failed to connect to the MPS control daemon or the MPS server. - */ - CUDA_ERROR_MPS_CONNECTION_FAILED = 805, - - /** - * This error indicates that the remote procedural call between the MPS server and the MPS client failed. - */ - CUDA_ERROR_MPS_RPC_FAILURE = 806, - - /** - * This error indicates that the MPS server is not ready to accept new MPS client requests. - * This error can be returned when the MPS server is in the process of recovering from a fatal failure. - */ - CUDA_ERROR_MPS_SERVER_NOT_READY = 807, - - /** - * This error indicates that the hardware resources required to create MPS client have been exhausted. - */ - CUDA_ERROR_MPS_MAX_CLIENTS_REACHED = 808, - - /** - * This error indicates the the hardware resources required to support device connections have been exhausted. - */ - CUDA_ERROR_MPS_MAX_CONNECTIONS_REACHED = 809, - - /** - * This error indicates that the operation is not permitted when - * the stream is capturing. - */ - CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED = 900, - - /** - * This error indicates that the current capture sequence on the stream - * has been invalidated due to a previous error. - */ - CUDA_ERROR_STREAM_CAPTURE_INVALIDATED = 901, - - /** - * This error indicates that the operation would have resulted in a merge - * of two independent capture sequences. - */ - CUDA_ERROR_STREAM_CAPTURE_MERGE = 902, - - /** - * This error indicates that the capture was not initiated in this stream. - */ - CUDA_ERROR_STREAM_CAPTURE_UNMATCHED = 903, - - /** - * This error indicates that the capture sequence contains a fork that was - * not joined to the primary stream. - */ - CUDA_ERROR_STREAM_CAPTURE_UNJOINED = 904, - - /** - * This error indicates that a dependency would have been created which - * crosses the capture sequence boundary. Only implicit in-stream ordering - * dependencies are allowed to cross the boundary. - */ - CUDA_ERROR_STREAM_CAPTURE_ISOLATION = 905, - - /** - * This error indicates a disallowed implicit dependency on a current capture - * sequence from cudaStreamLegacy. - */ - CUDA_ERROR_STREAM_CAPTURE_IMPLICIT = 906, - - /** - * This error indicates that the operation is not permitted on an event which - * was last recorded in a capturing stream. - */ - CUDA_ERROR_CAPTURED_EVENT = 907, - - /** - * A stream capture sequence not initiated with the ::CU_STREAM_CAPTURE_MODE_RELAXED - * argument to ::cuStreamBeginCapture was passed to ::cuStreamEndCapture in a - * different thread. - */ - CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD = 908, - - /** - * This error indicates that the timeout specified for the wait operation has lapsed. - */ - CUDA_ERROR_TIMEOUT = 909, - - /** - * This error indicates that the graph update was not performed because it included - * changes which violated constraints specific to instantiated graph update. - */ - CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE = 910, - - /** - * This indicates that an async error has occurred in a device outside of CUDA. - * If CUDA was waiting for an external device's signal before consuming shared data, - * the external device signaled an error indicating that the data is not valid for - * consumption. This leaves the process in an inconsistent state and any further CUDA - * work will return the same error. To continue using CUDA, the process must be - * terminated and relaunched. - */ - CUDA_ERROR_EXTERNAL_DEVICE = 911, - - /** - * This indicates that an unknown internal error has occurred. - */ - CUDA_ERROR_UNKNOWN = 999 -} CUresult; - -/** - * P2P Attributes - */ -typedef enum CUdevice_P2PAttribute_enum { - CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK = 0x01, /**< A relative value indicating the performance of the link between two devices */ - CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED = 0x02, /**< P2P Access is enable */ - CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED = 0x03, /**< Atomic operation over the link supported */ - CU_DEVICE_P2P_ATTRIBUTE_ACCESS_ACCESS_SUPPORTED = 0x04, /**< \deprecated use CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED instead */ - CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED = 0x04 /**< Accessing CUDA arrays over the link supported */ -} CUdevice_P2PAttribute; - -/** - * CUDA stream callback - * \param hStream The stream the callback was added to, as passed to ::cuStreamAddCallback. May be NULL. - * \param status ::CUDA_SUCCESS or any persistent error on the stream. - * \param userData User parameter provided at registration. - */ -typedef void (CUDA_CB *CUstreamCallback)(CUstream hStream, CUresult status, void *userData); - -/** - * Block size to per-block dynamic shared memory mapping for a certain - * kernel \param blockSize Block size of the kernel. - * - * \return The dynamic shared memory needed by a block. - */ -typedef size_t (CUDA_CB *CUoccupancyB2DSize)(int blockSize); - -/** - * If set, host memory is portable between CUDA contexts. - * Flag for ::cuMemHostAlloc() - */ -#define CU_MEMHOSTALLOC_PORTABLE 0x01 - -/** - * If set, host memory is mapped into CUDA address space and - * ::cuMemHostGetDevicePointer() may be called on the host pointer. - * Flag for ::cuMemHostAlloc() - */ -#define CU_MEMHOSTALLOC_DEVICEMAP 0x02 - -/** - * If set, host memory is allocated as write-combined - fast to write, - * faster to DMA, slow to read except via SSE4 streaming load instruction - * (MOVNTDQA). - * Flag for ::cuMemHostAlloc() - */ -#define CU_MEMHOSTALLOC_WRITECOMBINED 0x04 - -/** - * If set, host memory is portable between CUDA contexts. - * Flag for ::cuMemHostRegister() - */ -#define CU_MEMHOSTREGISTER_PORTABLE 0x01 - -/** - * If set, host memory is mapped into CUDA address space and - * ::cuMemHostGetDevicePointer() may be called on the host pointer. - * Flag for ::cuMemHostRegister() - */ -#define CU_MEMHOSTREGISTER_DEVICEMAP 0x02 - -/** - * If set, the passed memory pointer is treated as pointing to some - * memory-mapped I/O space, e.g. belonging to a third-party PCIe device. - * On Windows the flag is a no-op. - * On Linux that memory is marked as non cache-coherent for the GPU and - * is expected to be physically contiguous. It may return - * CUDA_ERROR_NOT_PERMITTED if run as an unprivileged user, - * CUDA_ERROR_NOT_SUPPORTED on older Linux kernel versions. - * On all other platforms, it is not supported and CUDA_ERROR_NOT_SUPPORTED - * is returned. - * Flag for ::cuMemHostRegister() - */ -#define CU_MEMHOSTREGISTER_IOMEMORY 0x04 - -/** -* If set, the passed memory pointer is treated as pointing to memory that is -* considered read-only by the device. On platforms without -* CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, this flag is -* required in order to register memory mapped to the CPU as read-only. Support -* for the use of this flag can be queried from the device attribute -* CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED. Using this flag with -* a current context associated with a device that does not have this attribute -* set will cause ::cuMemHostRegister to error with CUDA_ERROR_NOT_SUPPORTED. -*/ -#define CU_MEMHOSTREGISTER_READ_ONLY 0x08 - -/** - * 2D memory copy parameters - */ -typedef struct CUDA_MEMCPY2D_st { - size_t srcXInBytes; /**< Source X in bytes */ - size_t srcY; /**< Source Y */ - - CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */ - const void *srcHost; /**< Source host pointer */ - CUdeviceptr srcDevice; /**< Source device pointer */ - CUarray srcArray; /**< Source array reference */ - size_t srcPitch; /**< Source pitch (ignored when src is array) */ - - size_t dstXInBytes; /**< Destination X in bytes */ - size_t dstY; /**< Destination Y */ - - CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */ - void *dstHost; /**< Destination host pointer */ - CUdeviceptr dstDevice; /**< Destination device pointer */ - CUarray dstArray; /**< Destination array reference */ - size_t dstPitch; /**< Destination pitch (ignored when dst is array) */ - - size_t WidthInBytes; /**< Width of 2D memory copy in bytes */ - size_t Height; /**< Height of 2D memory copy */ -} CUDA_MEMCPY2D_v2; -typedef CUDA_MEMCPY2D_v2 CUDA_MEMCPY2D; - -/** - * 3D memory copy parameters - */ -typedef struct CUDA_MEMCPY3D_st { - size_t srcXInBytes; /**< Source X in bytes */ - size_t srcY; /**< Source Y */ - size_t srcZ; /**< Source Z */ - size_t srcLOD; /**< Source LOD */ - CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */ - const void *srcHost; /**< Source host pointer */ - CUdeviceptr srcDevice; /**< Source device pointer */ - CUarray srcArray; /**< Source array reference */ - void *reserved0; /**< Must be NULL */ - size_t srcPitch; /**< Source pitch (ignored when src is array) */ - size_t srcHeight; /**< Source height (ignored when src is array; may be 0 if Depth==1) */ - - size_t dstXInBytes; /**< Destination X in bytes */ - size_t dstY; /**< Destination Y */ - size_t dstZ; /**< Destination Z */ - size_t dstLOD; /**< Destination LOD */ - CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */ - void *dstHost; /**< Destination host pointer */ - CUdeviceptr dstDevice; /**< Destination device pointer */ - CUarray dstArray; /**< Destination array reference */ - void *reserved1; /**< Must be NULL */ - size_t dstPitch; /**< Destination pitch (ignored when dst is array) */ - size_t dstHeight; /**< Destination height (ignored when dst is array; may be 0 if Depth==1) */ - - size_t WidthInBytes; /**< Width of 3D memory copy in bytes */ - size_t Height; /**< Height of 3D memory copy */ - size_t Depth; /**< Depth of 3D memory copy */ -} CUDA_MEMCPY3D_v2; -typedef CUDA_MEMCPY3D_v2 CUDA_MEMCPY3D; - -/** - * 3D memory cross-context copy parameters - */ -typedef struct CUDA_MEMCPY3D_PEER_st { - size_t srcXInBytes; /**< Source X in bytes */ - size_t srcY; /**< Source Y */ - size_t srcZ; /**< Source Z */ - size_t srcLOD; /**< Source LOD */ - CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */ - const void *srcHost; /**< Source host pointer */ - CUdeviceptr srcDevice; /**< Source device pointer */ - CUarray srcArray; /**< Source array reference */ - CUcontext srcContext; /**< Source context (ignored with srcMemoryType is ::CU_MEMORYTYPE_ARRAY) */ - size_t srcPitch; /**< Source pitch (ignored when src is array) */ - size_t srcHeight; /**< Source height (ignored when src is array; may be 0 if Depth==1) */ - - size_t dstXInBytes; /**< Destination X in bytes */ - size_t dstY; /**< Destination Y */ - size_t dstZ; /**< Destination Z */ - size_t dstLOD; /**< Destination LOD */ - CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */ - void *dstHost; /**< Destination host pointer */ - CUdeviceptr dstDevice; /**< Destination device pointer */ - CUarray dstArray; /**< Destination array reference */ - CUcontext dstContext; /**< Destination context (ignored with dstMemoryType is ::CU_MEMORYTYPE_ARRAY) */ - size_t dstPitch; /**< Destination pitch (ignored when dst is array) */ - size_t dstHeight; /**< Destination height (ignored when dst is array; may be 0 if Depth==1) */ - - size_t WidthInBytes; /**< Width of 3D memory copy in bytes */ - size_t Height; /**< Height of 3D memory copy */ - size_t Depth; /**< Depth of 3D memory copy */ -} CUDA_MEMCPY3D_PEER_v1; -typedef CUDA_MEMCPY3D_PEER_v1 CUDA_MEMCPY3D_PEER; - -/** - * Array descriptor - */ -typedef struct CUDA_ARRAY_DESCRIPTOR_st -{ - size_t Width; /**< Width of array */ - size_t Height; /**< Height of array */ - - CUarray_format Format; /**< Array format */ - unsigned int NumChannels; /**< Channels per array element */ -} CUDA_ARRAY_DESCRIPTOR_v2; -typedef CUDA_ARRAY_DESCRIPTOR_v2 CUDA_ARRAY_DESCRIPTOR; - -/** - * 3D array descriptor - */ -typedef struct CUDA_ARRAY3D_DESCRIPTOR_st -{ - size_t Width; /**< Width of 3D array */ - size_t Height; /**< Height of 3D array */ - size_t Depth; /**< Depth of 3D array */ - - CUarray_format Format; /**< Array format */ - unsigned int NumChannels; /**< Channels per array element */ - unsigned int Flags; /**< Flags */ -} CUDA_ARRAY3D_DESCRIPTOR_v2; -typedef CUDA_ARRAY3D_DESCRIPTOR_v2 CUDA_ARRAY3D_DESCRIPTOR; - -/** - * Indicates that the layered sparse CUDA array or CUDA mipmapped array has a single mip tail region for all layers - */ -#define CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL 0x1 - -/** - * CUDA array sparse properties - */ -typedef struct CUDA_ARRAY_SPARSE_PROPERTIES_st { - struct { - unsigned int width; /**< Width of sparse tile in elements */ - unsigned int height; /**< Height of sparse tile in elements */ - unsigned int depth; /**< Depth of sparse tile in elements */ - } tileExtent; - - /** - * First mip level at which the mip tail begins. - */ - unsigned int miptailFirstLevel; - /** - * Total size of the mip tail. - */ - unsigned long long miptailSize; - /** - * Flags will either be zero or ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL - */ - unsigned int flags; - unsigned int reserved[4]; -} CUDA_ARRAY_SPARSE_PROPERTIES_v1; -typedef CUDA_ARRAY_SPARSE_PROPERTIES_v1 CUDA_ARRAY_SPARSE_PROPERTIES; - -/** - * CUDA Resource descriptor - */ -typedef struct CUDA_RESOURCE_DESC_st -{ - CUresourcetype resType; /**< Resource type */ - - union { - struct { - CUarray hArray; /**< CUDA array */ - } array; - struct { - CUmipmappedArray hMipmappedArray; /**< CUDA mipmapped array */ - } mipmap; - struct { - CUdeviceptr devPtr; /**< Device pointer */ - CUarray_format format; /**< Array format */ - unsigned int numChannels; /**< Channels per array element */ - size_t sizeInBytes; /**< Size in bytes */ - } linear; - struct { - CUdeviceptr devPtr; /**< Device pointer */ - CUarray_format format; /**< Array format */ - unsigned int numChannels; /**< Channels per array element */ - size_t width; /**< Width of the array in elements */ - size_t height; /**< Height of the array in elements */ - size_t pitchInBytes; /**< Pitch between two rows in bytes */ - } pitch2D; - struct { - int reserved[32]; - } reserved; - } res; - - unsigned int flags; /**< Flags (must be zero) */ -} CUDA_RESOURCE_DESC_v1; -typedef CUDA_RESOURCE_DESC_v1 CUDA_RESOURCE_DESC; - -/** - * Texture descriptor - */ -typedef struct CUDA_TEXTURE_DESC_st { - CUaddress_mode addressMode[3]; /**< Address modes */ - CUfilter_mode filterMode; /**< Filter mode */ - unsigned int flags; /**< Flags */ - unsigned int maxAnisotropy; /**< Maximum anisotropy ratio */ - CUfilter_mode mipmapFilterMode; /**< Mipmap filter mode */ - float mipmapLevelBias; /**< Mipmap level bias */ - float minMipmapLevelClamp; /**< Mipmap minimum level clamp */ - float maxMipmapLevelClamp; /**< Mipmap maximum level clamp */ - float borderColor[4]; /**< Border Color */ - int reserved[12]; -} CUDA_TEXTURE_DESC_v1; -typedef CUDA_TEXTURE_DESC_v1 CUDA_TEXTURE_DESC; - -/** - * Resource view format - */ -typedef enum CUresourceViewFormat_enum -{ - CU_RES_VIEW_FORMAT_NONE = 0x00, /**< No resource view format (use underlying resource format) */ - CU_RES_VIEW_FORMAT_UINT_1X8 = 0x01, /**< 1 channel unsigned 8-bit integers */ - CU_RES_VIEW_FORMAT_UINT_2X8 = 0x02, /**< 2 channel unsigned 8-bit integers */ - CU_RES_VIEW_FORMAT_UINT_4X8 = 0x03, /**< 4 channel unsigned 8-bit integers */ - CU_RES_VIEW_FORMAT_SINT_1X8 = 0x04, /**< 1 channel signed 8-bit integers */ - CU_RES_VIEW_FORMAT_SINT_2X8 = 0x05, /**< 2 channel signed 8-bit integers */ - CU_RES_VIEW_FORMAT_SINT_4X8 = 0x06, /**< 4 channel signed 8-bit integers */ - CU_RES_VIEW_FORMAT_UINT_1X16 = 0x07, /**< 1 channel unsigned 16-bit integers */ - CU_RES_VIEW_FORMAT_UINT_2X16 = 0x08, /**< 2 channel unsigned 16-bit integers */ - CU_RES_VIEW_FORMAT_UINT_4X16 = 0x09, /**< 4 channel unsigned 16-bit integers */ - CU_RES_VIEW_FORMAT_SINT_1X16 = 0x0a, /**< 1 channel signed 16-bit integers */ - CU_RES_VIEW_FORMAT_SINT_2X16 = 0x0b, /**< 2 channel signed 16-bit integers */ - CU_RES_VIEW_FORMAT_SINT_4X16 = 0x0c, /**< 4 channel signed 16-bit integers */ - CU_RES_VIEW_FORMAT_UINT_1X32 = 0x0d, /**< 1 channel unsigned 32-bit integers */ - CU_RES_VIEW_FORMAT_UINT_2X32 = 0x0e, /**< 2 channel unsigned 32-bit integers */ - CU_RES_VIEW_FORMAT_UINT_4X32 = 0x0f, /**< 4 channel unsigned 32-bit integers */ - CU_RES_VIEW_FORMAT_SINT_1X32 = 0x10, /**< 1 channel signed 32-bit integers */ - CU_RES_VIEW_FORMAT_SINT_2X32 = 0x11, /**< 2 channel signed 32-bit integers */ - CU_RES_VIEW_FORMAT_SINT_4X32 = 0x12, /**< 4 channel signed 32-bit integers */ - CU_RES_VIEW_FORMAT_FLOAT_1X16 = 0x13, /**< 1 channel 16-bit floating point */ - CU_RES_VIEW_FORMAT_FLOAT_2X16 = 0x14, /**< 2 channel 16-bit floating point */ - CU_RES_VIEW_FORMAT_FLOAT_4X16 = 0x15, /**< 4 channel 16-bit floating point */ - CU_RES_VIEW_FORMAT_FLOAT_1X32 = 0x16, /**< 1 channel 32-bit floating point */ - CU_RES_VIEW_FORMAT_FLOAT_2X32 = 0x17, /**< 2 channel 32-bit floating point */ - CU_RES_VIEW_FORMAT_FLOAT_4X32 = 0x18, /**< 4 channel 32-bit floating point */ - CU_RES_VIEW_FORMAT_UNSIGNED_BC1 = 0x19, /**< Block compressed 1 */ - CU_RES_VIEW_FORMAT_UNSIGNED_BC2 = 0x1a, /**< Block compressed 2 */ - CU_RES_VIEW_FORMAT_UNSIGNED_BC3 = 0x1b, /**< Block compressed 3 */ - CU_RES_VIEW_FORMAT_UNSIGNED_BC4 = 0x1c, /**< Block compressed 4 unsigned */ - CU_RES_VIEW_FORMAT_SIGNED_BC4 = 0x1d, /**< Block compressed 4 signed */ - CU_RES_VIEW_FORMAT_UNSIGNED_BC5 = 0x1e, /**< Block compressed 5 unsigned */ - CU_RES_VIEW_FORMAT_SIGNED_BC5 = 0x1f, /**< Block compressed 5 signed */ - CU_RES_VIEW_FORMAT_UNSIGNED_BC6H = 0x20, /**< Block compressed 6 unsigned half-float */ - CU_RES_VIEW_FORMAT_SIGNED_BC6H = 0x21, /**< Block compressed 6 signed half-float */ - CU_RES_VIEW_FORMAT_UNSIGNED_BC7 = 0x22 /**< Block compressed 7 */ -} CUresourceViewFormat; - -/** - * Resource view descriptor - */ -typedef struct CUDA_RESOURCE_VIEW_DESC_st -{ - CUresourceViewFormat format; /**< Resource view format */ - size_t width; /**< Width of the resource view */ - size_t height; /**< Height of the resource view */ - size_t depth; /**< Depth of the resource view */ - unsigned int firstMipmapLevel; /**< First defined mipmap level */ - unsigned int lastMipmapLevel; /**< Last defined mipmap level */ - unsigned int firstLayer; /**< First layer index */ - unsigned int lastLayer; /**< Last layer index */ - unsigned int reserved[16]; -} CUDA_RESOURCE_VIEW_DESC_v1; -typedef CUDA_RESOURCE_VIEW_DESC_v1 CUDA_RESOURCE_VIEW_DESC; - -/** - * GPU Direct v3 tokens - */ -typedef struct CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_st { - unsigned long long p2pToken; - unsigned int vaSpaceToken; -} CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1; -typedef CUDA_POINTER_ATTRIBUTE_P2P_TOKENS_v1 CUDA_POINTER_ATTRIBUTE_P2P_TOKENS; - -/** -* Access flags that specify the level of access the current context's device has -* on the memory referenced. -*/ -typedef enum CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS_enum { - CU_POINTER_ATTRIBUTE_ACCESS_FLAG_NONE = 0x0, /**< No access, meaning the device cannot access this memory at all, thus must be staged through accessible memory in order to complete certain operations */ - CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READ = 0x1, /**< Read-only access, meaning writes to this memory are considered invalid accesses and thus return error in that case. */ - CU_POINTER_ATTRIBUTE_ACCESS_FLAG_READWRITE = 0x3 /**< Read-write access, the device has full read-write access to the memory */ -} CUDA_POINTER_ATTRIBUTE_ACCESS_FLAGS; - -/** - * Kernel launch parameters - */ -typedef struct CUDA_LAUNCH_PARAMS_st { - CUfunction function; /**< Kernel to launch */ - unsigned int gridDimX; /**< Width of grid in blocks */ - unsigned int gridDimY; /**< Height of grid in blocks */ - unsigned int gridDimZ; /**< Depth of grid in blocks */ - unsigned int blockDimX; /**< X dimension of each thread block */ - unsigned int blockDimY; /**< Y dimension of each thread block */ - unsigned int blockDimZ; /**< Z dimension of each thread block */ - unsigned int sharedMemBytes; /**< Dynamic shared-memory size per thread block in bytes */ - CUstream hStream; /**< Stream identifier */ - void **kernelParams; /**< Array of pointers to kernel parameters */ -} CUDA_LAUNCH_PARAMS_v1; -typedef CUDA_LAUNCH_PARAMS_v1 CUDA_LAUNCH_PARAMS; - -/** - * External memory handle types - */ -typedef enum CUexternalMemoryHandleType_enum { - /** - * Handle is an opaque file descriptor - */ - CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD = 1, - /** - * Handle is an opaque shared NT handle - */ - CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 = 2, - /** - * Handle is an opaque, globally shared handle - */ - CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3, - /** - * Handle is a D3D12 heap object - */ - CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP = 4, - /** - * Handle is a D3D12 committed resource - */ - CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE = 5, - /** - * Handle is a shared NT handle to a D3D11 resource - */ - CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE = 6, - /** - * Handle is a globally shared handle to a D3D11 resource - */ - CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT = 7, - /** - * Handle is an NvSciBuf object - */ - CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF = 8 -} CUexternalMemoryHandleType; - -/** - * Indicates that the external memory object is a dedicated resource - */ -#define CUDA_EXTERNAL_MEMORY_DEDICATED 0x1 - -/** When the \p flags parameter of ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS - * contains this flag, it indicates that signaling an external semaphore object - * should skip performing appropriate memory synchronization operations over all - * the external memory objects that are imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, - * which otherwise are performed by default to ensure data coherency with other - * importers of the same NvSciBuf memory objects. - */ -#define CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC 0x01 - -/** When the \p flags parameter of ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS - * contains this flag, it indicates that waiting on an external semaphore object - * should skip performing appropriate memory synchronization operations over all - * the external memory objects that are imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, - * which otherwise are performed by default to ensure data coherency with other - * importers of the same NvSciBuf memory objects. - */ -#define CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC 0x02 - -/** - * When \p flags of ::cuDeviceGetNvSciSyncAttributes is set to this, - * it indicates that application needs signaler specific NvSciSyncAttr - * to be filled by ::cuDeviceGetNvSciSyncAttributes. - */ -#define CUDA_NVSCISYNC_ATTR_SIGNAL 0x1 - -/** - * When \p flags of ::cuDeviceGetNvSciSyncAttributes is set to this, - * it indicates that application needs waiter specific NvSciSyncAttr - * to be filled by ::cuDeviceGetNvSciSyncAttributes. - */ -#define CUDA_NVSCISYNC_ATTR_WAIT 0x2 -/** - * External memory handle descriptor - */ -typedef struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st { - /** - * Type of the handle - */ - CUexternalMemoryHandleType type; - union { - /** - * File descriptor referencing the memory object. Valid - * when type is - * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD - */ - int fd; - /** - * Win32 handle referencing the semaphore object. Valid when - * type is one of the following: - * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 - * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT - * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP - * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE - * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE - * - ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT - * Exactly one of 'handle' and 'name' must be non-NULL. If - * type is one of the following: - * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT - * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT - * then 'name' must be NULL. - */ - struct { - /** - * Valid NT handle. Must be NULL if 'name' is non-NULL - */ - void *handle; - /** - * Name of a valid memory object. - * Must be NULL if 'handle' is non-NULL. - */ - const void *name; - } win32; - /** - * A handle representing an NvSciBuf Object. Valid when type - * is ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF - */ - const void *nvSciBufObject; - } handle; - /** - * Size of the memory allocation - */ - unsigned long long size; - /** - * Flags must either be zero or ::CUDA_EXTERNAL_MEMORY_DEDICATED - */ - unsigned int flags; - unsigned int reserved[16]; -} CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1; -typedef CUDA_EXTERNAL_MEMORY_HANDLE_DESC_v1 CUDA_EXTERNAL_MEMORY_HANDLE_DESC; - -/** - * External memory buffer descriptor - */ -typedef struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st { - /** - * Offset into the memory object where the buffer's base is - */ - unsigned long long offset; - /** - * Size of the buffer - */ - unsigned long long size; - /** - * Flags reserved for future use. Must be zero. - */ - unsigned int flags; - unsigned int reserved[16]; -} CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1; -typedef CUDA_EXTERNAL_MEMORY_BUFFER_DESC_v1 CUDA_EXTERNAL_MEMORY_BUFFER_DESC; - -/** - * External memory mipmap descriptor - */ -typedef struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st { - /** - * Offset into the memory object where the base level of the - * mipmap chain is. - */ - unsigned long long offset; - /** - * Format, dimension and type of base level of the mipmap chain - */ - CUDA_ARRAY3D_DESCRIPTOR arrayDesc; - /** - * Total number of levels in the mipmap chain - */ - unsigned int numLevels; - unsigned int reserved[16]; -} CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1; -typedef CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_v1 CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC; - -/** - * External semaphore handle types - */ -typedef enum CUexternalSemaphoreHandleType_enum { - /** - * Handle is an opaque file descriptor - */ - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD = 1, - /** - * Handle is an opaque shared NT handle - */ - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 = 2, - /** - * Handle is an opaque, globally shared handle - */ - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3, - /** - * Handle is a shared NT handle referencing a D3D12 fence object - */ - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE = 4, - /** - * Handle is a shared NT handle referencing a D3D11 fence object - */ - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE = 5, - /** - * Opaque handle to NvSciSync Object - */ - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC = 6, - /** - * Handle is a shared NT handle referencing a D3D11 keyed mutex object - */ - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX = 7, - /** - * Handle is a globally shared handle referencing a D3D11 keyed mutex object - */ - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT = 8, - /** - * Handle is an opaque file descriptor referencing a timeline semaphore - */ - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD = 9, - /** - * Handle is an opaque shared NT handle referencing a timeline semaphore - */ - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10 -} CUexternalSemaphoreHandleType; - -/** - * External semaphore handle descriptor - */ -typedef struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st { - /** - * Type of the handle - */ - CUexternalSemaphoreHandleType type; - union { - /** - * File descriptor referencing the semaphore object. Valid - * when type is one of the following: - * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD - * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD - */ - int fd; - /** - * Win32 handle referencing the semaphore object. Valid when - * type is one of the following: - * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 - * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT - * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE - * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE - * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX - * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 - * Exactly one of 'handle' and 'name' must be non-NULL. If - * type is one of the following: - * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT - * - ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT - * then 'name' must be NULL. - */ - struct { - /** - * Valid NT handle. Must be NULL if 'name' is non-NULL - */ - void *handle; - /** - * Name of a valid synchronization primitive. - * Must be NULL if 'handle' is non-NULL. - */ - const void *name; - } win32; - /** - * Valid NvSciSyncObj. Must be non NULL - */ - const void* nvSciSyncObj; - } handle; - /** - * Flags reserved for the future. Must be zero. - */ - unsigned int flags; - unsigned int reserved[16]; -} CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1; -typedef CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_v1 CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC; - -/** - * External semaphore signal parameters - */ -typedef struct CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_st { - struct { - /** - * Parameters for fence objects - */ - struct { - /** - * Value of fence to be signaled - */ - unsigned long long value; - } fence; - union { - /** - * Pointer to NvSciSyncFence. Valid if ::CUexternalSemaphoreHandleType - * is of type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC. - */ - void *fence; - unsigned long long reserved; - } nvSciSync; - /** - * Parameters for keyed mutex objects - */ - struct { - /** - * Value of key to release the mutex with - */ - unsigned long long key; - } keyedMutex; - unsigned int reserved[12]; - } params; - /** - * Only when ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS is used to - * signal a ::CUexternalSemaphore of type - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, the valid flag is - * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC which indicates - * that while signaling the ::CUexternalSemaphore, no memory synchronization - * operations should be performed for any external memory object imported - * as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. - * For all other types of ::CUexternalSemaphore, flags must be zero. - */ - unsigned int flags; - unsigned int reserved[16]; -} CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1; -typedef CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS_v1 CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS; - -/** - * External semaphore wait parameters - */ -typedef struct CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_st { - struct { - /** - * Parameters for fence objects - */ - struct { - /** - * Value of fence to be waited on - */ - unsigned long long value; - } fence; - /** - * Pointer to NvSciSyncFence. Valid if CUexternalSemaphoreHandleType - * is of type CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC. - */ - union { - void *fence; - unsigned long long reserved; - } nvSciSync; - /** - * Parameters for keyed mutex objects - */ - struct { - /** - * Value of key to acquire the mutex with - */ - unsigned long long key; - /** - * Timeout in milliseconds to wait to acquire the mutex - */ - unsigned int timeoutMs; - } keyedMutex; - unsigned int reserved[10]; - } params; - /** - * Only when ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS is used to wait on - * a ::CUexternalSemaphore of type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, - * the valid flag is ::CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC - * which indicates that while waiting for the ::CUexternalSemaphore, no memory - * synchronization operations should be performed for any external memory - * object imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. - * For all other types of ::CUexternalSemaphore, flags must be zero. - */ - unsigned int flags; - unsigned int reserved[16]; -} CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1; -typedef CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS_v1 CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS; - -/** - * Semaphore signal node parameters - */ -typedef struct CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_st { - CUexternalSemaphore* extSemArray; /**< Array of external semaphore handles. */ - const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS* paramsArray; /**< Array of external semaphore signal parameters. */ - unsigned int numExtSems; /**< Number of handles and parameters supplied in extSemArray and paramsArray. */ -} CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1; -typedef CUDA_EXT_SEM_SIGNAL_NODE_PARAMS_v1 CUDA_EXT_SEM_SIGNAL_NODE_PARAMS; - -/** - * Semaphore wait node parameters - */ -typedef struct CUDA_EXT_SEM_WAIT_NODE_PARAMS_st { - CUexternalSemaphore* extSemArray; /**< Array of external semaphore handles. */ - const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS* paramsArray; /**< Array of external semaphore wait parameters. */ - unsigned int numExtSems; /**< Number of handles and parameters supplied in extSemArray and paramsArray. */ -} CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1; -typedef CUDA_EXT_SEM_WAIT_NODE_PARAMS_v1 CUDA_EXT_SEM_WAIT_NODE_PARAMS; - -typedef unsigned long long CUmemGenericAllocationHandle_v1; -typedef CUmemGenericAllocationHandle_v1 CUmemGenericAllocationHandle; - -/** - * Flags for specifying particular handle types - */ -typedef enum CUmemAllocationHandleType_enum { - CU_MEM_HANDLE_TYPE_NONE = 0x0, /**< Does not allow any export mechanism. > */ - CU_MEM_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR = 0x1, /**< Allows a file descriptor to be used for exporting. Permitted only on POSIX systems. (int) */ - CU_MEM_HANDLE_TYPE_WIN32 = 0x2, /**< Allows a Win32 NT handle to be used for exporting. (HANDLE) */ - CU_MEM_HANDLE_TYPE_WIN32_KMT = 0x4, /**< Allows a Win32 KMT handle to be used for exporting. (D3DKMT_HANDLE) */ - CU_MEM_HANDLE_TYPE_MAX = 0x7FFFFFFF -} CUmemAllocationHandleType; - -/** - * Specifies the memory protection flags for mapping. - */ -typedef enum CUmemAccess_flags_enum { - CU_MEM_ACCESS_FLAGS_PROT_NONE = 0x0, /**< Default, make the address range not accessible */ - CU_MEM_ACCESS_FLAGS_PROT_READ = 0x1, /**< Make the address range read accessible */ - CU_MEM_ACCESS_FLAGS_PROT_READWRITE = 0x3, /**< Make the address range read-write accessible */ - CU_MEM_ACCESS_FLAGS_PROT_MAX = 0x7FFFFFFF -} CUmemAccess_flags; - -/** - * Specifies the type of location - */ -typedef enum CUmemLocationType_enum { - CU_MEM_LOCATION_TYPE_INVALID = 0x0, - CU_MEM_LOCATION_TYPE_DEVICE = 0x1, /**< Location is a device location, thus id is a device ordinal */ - CU_MEM_LOCATION_TYPE_MAX = 0x7FFFFFFF -} CUmemLocationType; - -/** -* Defines the allocation types available -*/ -typedef enum CUmemAllocationType_enum { - CU_MEM_ALLOCATION_TYPE_INVALID = 0x0, - - /** This allocation type is 'pinned', i.e. cannot migrate from its current - * location while the application is actively using it - */ - CU_MEM_ALLOCATION_TYPE_PINNED = 0x1, - CU_MEM_ALLOCATION_TYPE_MAX = 0x7FFFFFFF -} CUmemAllocationType; - -/** -* Flag for requesting different optimal and required granularities for an allocation. -*/ -typedef enum CUmemAllocationGranularity_flags_enum { - CU_MEM_ALLOC_GRANULARITY_MINIMUM = 0x0, /**< Minimum required granularity for allocation */ - CU_MEM_ALLOC_GRANULARITY_RECOMMENDED = 0x1 /**< Recommended granularity for allocation for best performance */ -} CUmemAllocationGranularity_flags; - -/** - * Sparse subresource types - */ -typedef enum CUarraySparseSubresourceType_enum { - CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL = 0, - CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL = 1 -} CUarraySparseSubresourceType; - -/** - * Memory operation types - */ -typedef enum CUmemOperationType_enum { - CU_MEM_OPERATION_TYPE_MAP = 1, - CU_MEM_OPERATION_TYPE_UNMAP = 2 -} CUmemOperationType; - -/** - * Memory handle types - */ -typedef enum CUmemHandleType_enum { - CU_MEM_HANDLE_TYPE_GENERIC = 0 -} CUmemHandleType; - -/** - * Specifies the CUDA array or CUDA mipmapped array memory mapping information - */ -typedef struct CUarrayMapInfo_st { - CUresourcetype resourceType; /**< Resource type */ - - union { - CUmipmappedArray mipmap; - CUarray array; - } resource; - - CUarraySparseSubresourceType subresourceType; /**< Sparse subresource type */ - - union { - struct { - unsigned int level; /**< For CUDA mipmapped arrays must a valid mipmap level. For CUDA arrays must be zero */ - unsigned int layer; /**< For CUDA layered arrays must be a valid layer index. Otherwise, must be zero */ - unsigned int offsetX; /**< Starting X offset in elements */ - unsigned int offsetY; /**< Starting Y offset in elements */ - unsigned int offsetZ; /**< Starting Z offset in elements */ - unsigned int extentWidth; /**< Width in elements */ - unsigned int extentHeight; /**< Height in elements */ - unsigned int extentDepth; /**< Depth in elements */ - } sparseLevel; - struct { - unsigned int layer; /**< For CUDA layered arrays must be a valid layer index. Otherwise, must be zero */ - unsigned long long offset; /**< Offset within mip tail */ - unsigned long long size; /**< Extent in bytes */ - } miptail; - } subresource; - - CUmemOperationType memOperationType; /**< Memory operation type */ - CUmemHandleType memHandleType; /**< Memory handle type */ - - union { - CUmemGenericAllocationHandle memHandle; - } memHandle; - - unsigned long long offset; /**< Offset within the memory */ - unsigned int deviceBitMask; /**< Device ordinal bit mask */ - unsigned int flags; /**< flags for future use, must be zero now. */ - unsigned int reserved[2]; /**< Reserved for future use, must be zero now. */ -} CUarrayMapInfo_v1; -typedef CUarrayMapInfo_v1 CUarrayMapInfo; - -/** - * Specifies a memory location. - */ -typedef struct CUmemLocation_st { - CUmemLocationType type; /**< Specifies the location type, which modifies the meaning of id. */ - int id; /**< identifier for a given this location's ::CUmemLocationType. */ -} CUmemLocation_v1; -typedef CUmemLocation_v1 CUmemLocation; - -/** - * Specifies compression attribute for an allocation. - */ -typedef enum CUmemAllocationCompType_enum { - CU_MEM_ALLOCATION_COMP_NONE = 0x0, /**< Allocating non-compressible memory */ - CU_MEM_ALLOCATION_COMP_GENERIC = 0x1 /**< Allocating compressible memory */ -} CUmemAllocationCompType; - -/** - * This flag if set indicates that the memory will be used as a tile pool. - */ -#define CU_MEM_CREATE_USAGE_TILE_POOL 0x1 - -/** -* Specifies the allocation properties for a allocation. -*/ -typedef struct CUmemAllocationProp_st { - /** Allocation type */ - CUmemAllocationType type; - /** requested ::CUmemAllocationHandleType */ - CUmemAllocationHandleType requestedHandleTypes; - /** Location of allocation */ - CUmemLocation location; - /** - * Windows-specific POBJECT_ATTRIBUTES required when - * ::CU_MEM_HANDLE_TYPE_WIN32 is specified. This object attributes structure - * includes security attributes that define - * the scope of which exported allocations may be transferred to other - * processes. In all other cases, this field is required to be zero. - */ - void *win32HandleMetaData; - struct { - /** - * Allocation hint for requesting compressible memory. - * On devices that support Compute Data Compression, compressible - * memory can be used to accelerate accesses to data with unstructured - * sparsity and other compressible data patterns. Applications are - * expected to query allocation property of the handle obtained with - * ::cuMemCreate using ::cuMemGetAllocationPropertiesFromHandle to - * validate if the obtained allocation is compressible or not. Note that - * compressed memory may not be mappable on all devices. - */ - unsigned char compressionType; - unsigned char gpuDirectRDMACapable; - /** Bitmask indicating intended usage for this allocation */ - unsigned short usage; - unsigned char reserved[4]; - } allocFlags; -} CUmemAllocationProp_v1; -typedef CUmemAllocationProp_v1 CUmemAllocationProp; - -/** - * Memory access descriptor - */ -typedef struct CUmemAccessDesc_st { - CUmemLocation location; /**< Location on which the request is to change it's accessibility */ - CUmemAccess_flags flags; /**< ::CUmemProt accessibility flags to set on the request */ -} CUmemAccessDesc_v1; -typedef CUmemAccessDesc_v1 CUmemAccessDesc; - -typedef enum CUgraphExecUpdateResult_enum { - CU_GRAPH_EXEC_UPDATE_SUCCESS = 0x0, /**< The update succeeded */ - CU_GRAPH_EXEC_UPDATE_ERROR = 0x1, /**< The update failed for an unexpected reason which is described in the return value of the function */ - CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED = 0x2, /**< The update failed because the topology changed */ - CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED = 0x3, /**< The update failed because a node type changed */ - CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED = 0x4, /**< The update failed because the function of a kernel node changed (CUDA driver < 11.2) */ - CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED = 0x5, /**< The update failed because the parameters changed in a way that is not supported */ - CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED = 0x6, /**< The update failed because something about the node is not supported */ - CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE = 0x7 /**< The update failed because the function of a kernel node changed in an unsupported way */ -} CUgraphExecUpdateResult; - -/** - * CUDA memory pool attributes - */ -typedef enum CUmemPool_attribute_enum { - /** - * (value type = int) - * Allow cuMemAllocAsync to use memory asynchronously freed - * in another streams as long as a stream ordering dependency - * of the allocating stream on the free action exists. - * Cuda events and null stream interactions can create the required - * stream ordered dependencies. (default enabled) - */ - CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES = 1, - - /** - * (value type = int) - * Allow reuse of already completed frees when there is no dependency - * between the free and allocation. (default enabled) - */ - CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC, - - /** - * (value type = int) - * Allow cuMemAllocAsync to insert new stream dependencies - * in order to establish the stream ordering required to reuse - * a piece of memory released by cuFreeAsync (default enabled). - */ - CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES, - - /** - * (value type = cuuint64_t) - * Amount of reserved memory in bytes to hold onto before trying - * to release memory back to the OS. When more than the release - * threshold bytes of memory are held by the memory pool, the - * allocator will try to release memory back to the OS on the - * next call to stream, event or context synchronize. (default 0) - */ - CU_MEMPOOL_ATTR_RELEASE_THRESHOLD, - - /** - * (value type = cuuint64_t) - * Amount of backing memory currently allocated for the mempool. - */ - CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT, - - /** - * (value type = cuuint64_t) - * High watermark of backing memory allocated for the mempool since the - * last time it was reset. High watermark can only be reset to zero. - */ - CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH, - - /** - * (value type = cuuint64_t) - * Amount of memory from the pool that is currently in use by the application. - */ - CU_MEMPOOL_ATTR_USED_MEM_CURRENT, - - /** - * (value type = cuuint64_t) - * High watermark of the amount of memory from the pool that was in use by the application since - * the last time it was reset. High watermark can only be reset to zero. - */ - CU_MEMPOOL_ATTR_USED_MEM_HIGH -} CUmemPool_attribute; - -/** - * Specifies the properties of allocations made from the pool. - */ -typedef struct CUmemPoolProps_st { - CUmemAllocationType allocType; /**< Allocation type. Currently must be specified as CU_MEM_ALLOCATION_TYPE_PINNED */ - CUmemAllocationHandleType handleTypes; /**< Handle types that will be supported by allocations from the pool. */ - CUmemLocation location; /**< Location where allocations should reside. */ - /** - * Windows-specific LPSECURITYATTRIBUTES required when - * ::CU_MEM_HANDLE_TYPE_WIN32 is specified. This security attribute defines - * the scope of which exported allocations may be transferred to other - * processes. In all other cases, this field is required to be zero. - */ - void *win32SecurityAttributes; - unsigned char reserved[64]; /**< reserved for future use, must be 0 */ -} CUmemPoolProps_v1; -typedef CUmemPoolProps_v1 CUmemPoolProps; - -/** - * Opaque data for exporting a pool allocation - */ -typedef struct CUmemPoolPtrExportData_st { - unsigned char reserved[64]; -} CUmemPoolPtrExportData_v1; -typedef CUmemPoolPtrExportData_v1 CUmemPoolPtrExportData; - -/** - * Memory allocation node parameters - */ -typedef struct CUDA_MEM_ALLOC_NODE_PARAMS_st { - /** - * in: location where the allocation should reside (specified in ::location). - * ::handleTypes must be ::CU_MEM_HANDLE_TYPE_NONE. IPC is not supported. - */ - CUmemPoolProps poolProps; - const CUmemAccessDesc *accessDescs; /**< in: array of memory access descriptors. Used to describe peer GPU access */ - size_t accessDescCount; /**< in: number of memory access descriptors. Must not exceed the number of GPUs. */ - size_t bytesize; /**< in: size in bytes of the requested allocation */ - CUdeviceptr dptr; /**< out: address of the allocation returned by CUDA */ -} CUDA_MEM_ALLOC_NODE_PARAMS; - -typedef enum CUgraphMem_attribute_enum { - /** - * (value type = cuuint64_t) - * Amount of memory, in bytes, currently associated with graphs - */ - CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT, - - /** - * (value type = cuuint64_t) - * High watermark of memory, in bytes, associated with graphs since the - * last time it was reset. High watermark can only be reset to zero. - */ - CU_GRAPH_MEM_ATTR_USED_MEM_HIGH, - - /** - * (value type = cuuint64_t) - * Amount of memory, in bytes, currently allocated for use by - * the CUDA graphs asynchronous allocator. - */ - CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT, - - /** - * (value type = cuuint64_t) - * High watermark of memory, in bytes, currently allocated for use by - * the CUDA graphs asynchronous allocator. - */ - CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH -} CUgraphMem_attribute; - -/** - * If set, each kernel launched as part of ::cuLaunchCooperativeKernelMultiDevice only - * waits for prior work in the stream corresponding to that GPU to complete before the - * kernel begins execution. - */ -#define CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC 0x01 - -/** - * If set, any subsequent work pushed in a stream that participated in a call to - * ::cuLaunchCooperativeKernelMultiDevice will only wait for the kernel launched on - * the GPU corresponding to that stream to complete before it begins execution. - */ -#define CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC 0x02 - -/** - * If set, the CUDA array is a collection of layers, where each layer is either a 1D - * or a 2D array and the Depth member of CUDA_ARRAY3D_DESCRIPTOR specifies the number - * of layers, not the depth of a 3D array. - */ -#define CUDA_ARRAY3D_LAYERED 0x01 - -/** - * Deprecated, use CUDA_ARRAY3D_LAYERED - */ -#define CUDA_ARRAY3D_2DARRAY 0x01 - -/** - * This flag must be set in order to bind a surface reference - * to the CUDA array - */ -#define CUDA_ARRAY3D_SURFACE_LDST 0x02 - -/** - * If set, the CUDA array is a collection of six 2D arrays, representing faces of a cube. The - * width of such a CUDA array must be equal to its height, and Depth must be six. - * If ::CUDA_ARRAY3D_LAYERED flag is also set, then the CUDA array is a collection of cubemaps - * and Depth must be a multiple of six. - */ -#define CUDA_ARRAY3D_CUBEMAP 0x04 - -/** - * This flag must be set in order to perform texture gather operations - * on a CUDA array. - */ -#define CUDA_ARRAY3D_TEXTURE_GATHER 0x08 - -/** - * This flag if set indicates that the CUDA - * array is a DEPTH_TEXTURE. - */ -#define CUDA_ARRAY3D_DEPTH_TEXTURE 0x10 - -/** - * This flag indicates that the CUDA array may be bound as a color target - * in an external graphics API - */ -#define CUDA_ARRAY3D_COLOR_ATTACHMENT 0x20 - -/** - * This flag if set indicates that the CUDA array or CUDA mipmapped array - * is a sparse CUDA array or CUDA mipmapped array respectively - */ -#define CUDA_ARRAY3D_SPARSE 0x40 - -/** - * Override the texref format with a format inferred from the array. - * Flag for ::cuTexRefSetArray() - */ -#define CU_TRSA_OVERRIDE_FORMAT 0x01 - -/** - * Read the texture as integers rather than promoting the values to floats - * in the range [0,1]. - * Flag for ::cuTexRefSetFlags() and ::cuTexObjectCreate() - */ -#define CU_TRSF_READ_AS_INTEGER 0x01 - -/** - * Use normalized texture coordinates in the range [0,1) instead of [0,dim). - * Flag for ::cuTexRefSetFlags() and ::cuTexObjectCreate() - */ -#define CU_TRSF_NORMALIZED_COORDINATES 0x02 - -/** - * Perform sRGB->linear conversion during texture read. - * Flag for ::cuTexRefSetFlags() and ::cuTexObjectCreate() - */ -#define CU_TRSF_SRGB 0x10 - - /** - * Disable any trilinear filtering optimizations. - * Flag for ::cuTexRefSetFlags() and ::cuTexObjectCreate() - */ -#define CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION 0x20 - -/** - * End of array terminator for the \p extra parameter to - * ::cuLaunchKernel - */ -#define CU_LAUNCH_PARAM_END ((void*)0x00) - -/** - * Indicator that the next value in the \p extra parameter to - * ::cuLaunchKernel will be a pointer to a buffer containing all kernel - * parameters used for launching kernel \p f. This buffer needs to - * honor all alignment/padding requirements of the individual parameters. - * If ::CU_LAUNCH_PARAM_BUFFER_SIZE is not also specified in the - * \p extra array, then ::CU_LAUNCH_PARAM_BUFFER_POINTER will have no - * effect. - */ -#define CU_LAUNCH_PARAM_BUFFER_POINTER ((void*)0x01) - -/** - * Indicator that the next value in the \p extra parameter to - * ::cuLaunchKernel will be a pointer to a size_t which contains the - * size of the buffer specified with ::CU_LAUNCH_PARAM_BUFFER_POINTER. - * It is required that ::CU_LAUNCH_PARAM_BUFFER_POINTER also be specified - * in the \p extra array if the value associated with - * ::CU_LAUNCH_PARAM_BUFFER_SIZE is not zero. - */ -#define CU_LAUNCH_PARAM_BUFFER_SIZE ((void*)0x02) - -/** - * For texture references loaded into the module, use default texunit from - * texture reference. - */ -#define CU_PARAM_TR_DEFAULT -1 - -/** - * Device that represents the CPU - */ -#define CU_DEVICE_CPU ((CUdevice)-1) - -/** - * Device that represents an invalid device - */ -#define CU_DEVICE_INVALID ((CUdevice)-2) - -/** - * Bitmasks for ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_FLUSH_WRITES_OPTIONS - */ -typedef enum CUflushGPUDirectRDMAWritesOptions_enum { - CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_HOST = 1<<0, /**< ::cuFlushGPUDirectRDMAWrites() and its CUDA Runtime API counterpart are supported on the device. */ - CU_FLUSH_GPU_DIRECT_RDMA_WRITES_OPTION_MEMOPS = 1<<1 /**< The ::CU_STREAM_WAIT_VALUE_FLUSH flag and the ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES MemOp are supported on the device. */ -} CUflushGPUDirectRDMAWritesOptions; - -/** - * Platform native ordering for GPUDirect RDMA writes - */ -typedef enum CUGPUDirectRDMAWritesOrdering_enum { - CU_GPU_DIRECT_RDMA_WRITES_ORDERING_NONE = 0, /**< The device does not natively support ordering of remote writes. ::cuFlushGPUDirectRDMAWrites() can be leveraged if supported. */ - CU_GPU_DIRECT_RDMA_WRITES_ORDERING_OWNER = 100, /**< Natively, the device can consistently consume remote writes, although other CUDA devices may not. */ - CU_GPU_DIRECT_RDMA_WRITES_ORDERING_ALL_DEVICES = 200 /**< Any CUDA device in the system can consistently consume remote writes to this device. */ -} CUGPUDirectRDMAWritesOrdering; - -/** - * The scopes for ::cuFlushGPUDirectRDMAWrites - */ -typedef enum CUflushGPUDirectRDMAWritesScope_enum { - CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_OWNER = 100, /**< Blocks until remote writes are visible to the CUDA device context owning the data. */ - CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TO_ALL_DEVICES = 200 /**< Blocks until remote writes are visible to all CUDA device contexts. */ -} CUflushGPUDirectRDMAWritesScope; - -/** - * The targets for ::cuFlushGPUDirectRDMAWrites - */ -typedef enum CUflushGPUDirectRDMAWritesTarget_enum { - CU_FLUSH_GPU_DIRECT_RDMA_WRITES_TARGET_CURRENT_CTX = 0 /**< Sets the target for ::cuFlushGPUDirectRDMAWrites() to the currently active CUDA device context. */ -} CUflushGPUDirectRDMAWritesTarget; - -/** - * The additional write options for ::cuGraphDebugDotPrint - */ -typedef enum CUgraphDebugDot_flags_enum { - CU_GRAPH_DEBUG_DOT_FLAGS_VERBOSE = 1<<0, /** Output all debug data as if every debug flag is enabled */ - CU_GRAPH_DEBUG_DOT_FLAGS_RUNTIME_TYPES = 1<<1, /** Use CUDA Runtime structures for output */ - CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_PARAMS = 1<<2, /** Adds CUDA_KERNEL_NODE_PARAMS values to output */ - CU_GRAPH_DEBUG_DOT_FLAGS_MEMCPY_NODE_PARAMS = 1<<3, /** Adds CUDA_MEMCPY3D values to output */ - CU_GRAPH_DEBUG_DOT_FLAGS_MEMSET_NODE_PARAMS = 1<<4, /** Adds CUDA_MEMSET_NODE_PARAMS values to output */ - CU_GRAPH_DEBUG_DOT_FLAGS_HOST_NODE_PARAMS = 1<<5, /** Adds CUDA_HOST_NODE_PARAMS values to output */ - CU_GRAPH_DEBUG_DOT_FLAGS_EVENT_NODE_PARAMS = 1<<6, /** Adds CUevent handle from record and wait nodes to output */ - CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_SIGNAL_NODE_PARAMS = 1<<7, /** Adds CUDA_EXT_SEM_SIGNAL_NODE_PARAMS values to output */ - CU_GRAPH_DEBUG_DOT_FLAGS_EXT_SEMAS_WAIT_NODE_PARAMS = 1<<8, /** Adds CUDA_EXT_SEM_WAIT_NODE_PARAMS values to output */ - CU_GRAPH_DEBUG_DOT_FLAGS_KERNEL_NODE_ATTRIBUTES = 1<<9, /** Adds CUkernelNodeAttrValue values to output */ - CU_GRAPH_DEBUG_DOT_FLAGS_HANDLES = 1<<10, /** Adds node handles and every kernel function handle to output */ - CU_GRAPH_DEBUG_DOT_FLAGS_MEM_ALLOC_NODE_PARAMS = 1<<11, /** Adds memory alloc node parameters to output */ - CU_GRAPH_DEBUG_DOT_FLAGS_MEM_FREE_NODE_PARAMS = 1<<12 /** Adds memory free node parameters to output */ -} CUgraphDebugDot_flags; - -/** - * Flags for user objects for graphs - */ -typedef enum CUuserObject_flags_enum { - CU_USER_OBJECT_NO_DESTRUCTOR_SYNC = 1 /**< Indicates the destructor execution is not synchronized by any CUDA handle. */ -} CUuserObject_flags; - -/** - * Flags for retaining user object references for graphs - */ -typedef enum CUuserObjectRetain_flags_enum { - CU_GRAPH_USER_OBJECT_MOVE = 1 /**< Transfer references from the caller rather than creating new references. */ -} CUuserObjectRetain_flags; - -/** - * Flags for instantiating a graph - */ -typedef enum CUgraphInstantiate_flags_enum { - CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH = 1 /**< Automatically free memory allocated in a graph before relaunching. */ -} CUgraphInstantiate_flags; - -/** @} */ /* END CUDA_TYPES */ - -#if defined(__GNUC__) - #if defined(__CUDA_API_PUSH_VISIBILITY_DEFAULT) - #pragma GCC visibility push(default) - #endif -#endif - -#ifdef _WIN32 -#define CUDAAPI __stdcall -#else -#define CUDAAPI -#endif - -/** - * \defgroup CUDA_ERROR Error Handling - * - * ___MANBRIEF___ error handling functions of the low-level CUDA driver API - * (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the error handling functions of the low-level CUDA - * driver application programming interface. - * - * @{ - */ - -/** - * \brief Gets the string description of an error code - * - * Sets \p *pStr to the address of a NULL-terminated string description - * of the error code \p error. - * If the error code is not recognized, ::CUDA_ERROR_INVALID_VALUE - * will be returned and \p *pStr will be set to the NULL address. - * - * \param error - Error code to convert to string - * \param pStr - Address of the string pointer. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::CUresult, - * ::cudaGetErrorString - */ -CUresult CUDAAPI cuGetErrorString(CUresult error, const char **pStr); - -/** - * \brief Gets the string representation of an error code enum name - * - * Sets \p *pStr to the address of a NULL-terminated string representation - * of the name of the enum error code \p error. - * If the error code is not recognized, ::CUDA_ERROR_INVALID_VALUE - * will be returned and \p *pStr will be set to the NULL address. - * - * \param error - Error code to convert to string - * \param pStr - Address of the string pointer. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::CUresult, - * ::cudaGetErrorName - */ -CUresult CUDAAPI cuGetErrorName(CUresult error, const char **pStr); - -/** @} */ /* END CUDA_ERROR */ - -/** - * \defgroup CUDA_INITIALIZE Initialization - * - * ___MANBRIEF___ initialization functions of the low-level CUDA driver API - * (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the initialization functions of the low-level CUDA - * driver application programming interface. - * - * @{ - */ - -/** - * \brief Initialize the CUDA driver API - * - * Initializes the driver API and must be called before any other function from - * the driver API. Currently, the \p Flags parameter must be 0. If ::cuInit() - * has not been called, any function from the driver API will return - * ::CUDA_ERROR_NOT_INITIALIZED. - * - * \param Flags - Initialization flag for CUDA. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE, - * ::CUDA_ERROR_SYSTEM_DRIVER_MISMATCH, - * ::CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE - * \notefnerr - */ -CUresult CUDAAPI cuInit(unsigned int Flags); - -/** @} */ /* END CUDA_INITIALIZE */ - -/** - * \defgroup CUDA_VERSION Version Management - * - * ___MANBRIEF___ version management functions of the low-level CUDA driver - * API (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the version management functions of the low-level - * CUDA driver application programming interface. - * - * @{ - */ - -/** - * \brief Returns the latest CUDA version supported by driver - * - * Returns in \p *driverVersion the version of CUDA supported by - * the driver. The version is returned as - * (1000 × major + 10 × minor). For example, CUDA 9.2 - * would be represented by 9020. - * - * This function automatically returns ::CUDA_ERROR_INVALID_VALUE if - * \p driverVersion is NULL. - * - * \param driverVersion - Returns the CUDA driver version - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa - * ::cudaDriverGetVersion, - * ::cudaRuntimeGetVersion - */ -CUresult CUDAAPI cuDriverGetVersion(int *driverVersion); - -/** @} */ /* END CUDA_VERSION */ - -/** - * \defgroup CUDA_DEVICE Device Management - * - * ___MANBRIEF___ device management functions of the low-level CUDA driver API - * (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the device management functions of the low-level - * CUDA driver application programming interface. - * - * @{ - */ - -/** - * \brief Returns a handle to a compute device - * - * Returns in \p *device a device handle given an ordinal in the range [0, - * ::cuDeviceGetCount()-1]. - * - * \param device - Returned device handle - * \param ordinal - Device number to get handle for - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa - * ::cuDeviceGetAttribute, - * ::cuDeviceGetCount, - * ::cuDeviceGetName, - * ::cuDeviceGetUuid, - * ::cuDeviceGetLuid, - * ::cuDeviceTotalMem, - * ::cuDeviceGetExecAffinitySupport - */ -CUresult CUDAAPI cuDeviceGet(CUdevice *device, int ordinal); - -/** - * \brief Returns the number of compute-capable devices - * - * Returns in \p *count the number of devices with compute capability greater - * than or equal to 2.0 that are available for execution. If there is no such - * device, ::cuDeviceGetCount() returns 0. - * - * \param count - Returned number of compute-capable devices - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa - * ::cuDeviceGetAttribute, - * ::cuDeviceGetName, - * ::cuDeviceGetUuid, - * ::cuDeviceGetLuid, - * ::cuDeviceGet, - * ::cuDeviceTotalMem, - * ::cuDeviceGetExecAffinitySupport, - * ::cudaGetDeviceCount - */ -CUresult CUDAAPI cuDeviceGetCount(int *count); - -/** - * \brief Returns an identifier string for the device - * - * Returns an ASCII string identifying the device \p dev in the NULL-terminated - * string pointed to by \p name. \p len specifies the maximum length of the - * string that may be returned. - * - * \param name - Returned identifier string for the device - * \param len - Maximum length of string to store in \p name - * \param dev - Device to get identifier string for - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa - * ::cuDeviceGetAttribute, - * ::cuDeviceGetUuid, - * ::cuDeviceGetLuid, - * ::cuDeviceGetCount, - * ::cuDeviceGet, - * ::cuDeviceTotalMem, - * ::cuDeviceGetExecAffinitySupport, - * ::cudaGetDeviceProperties - */ -CUresult CUDAAPI cuDeviceGetName(char *name, int len, CUdevice dev); - -/** - * \brief Return an UUID for the device - * - * Note there is a later version of this API, ::cuDeviceGetUuid_v2. It will - * supplant this version in 12.0, which is retained for minor version compatibility. - * - * Returns 16-octets identifying the device \p dev in the structure - * pointed by the \p uuid. - * - * \param uuid - Returned UUID - * \param dev - Device to get identifier string for - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa - * ::cuDeviceGetUuid_v2 - * ::cuDeviceGetAttribute, - * ::cuDeviceGetCount, - * ::cuDeviceGetName, - * ::cuDeviceGetLuid, - * ::cuDeviceGet, - * ::cuDeviceTotalMem, - * ::cuDeviceGetExecAffinitySupport, - * ::cudaGetDeviceProperties - */ -CUresult CUDAAPI cuDeviceGetUuid(CUuuid *uuid, CUdevice dev); - -/** - * \brief Return an UUID for the device (11.4+) - * - * Returns 16-octets identifying the device \p dev in the structure - * pointed by the \p uuid. If the device is in MIG mode, returns its - * MIG UUID which uniquely identifies the subscribed MIG compute instance. - * - * \param uuid - Returned UUID - * \param dev - Device to get identifier string for - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa - * ::cuDeviceGetAttribute, - * ::cuDeviceGetCount, - * ::cuDeviceGetName, - * ::cuDeviceGetLuid, - * ::cuDeviceGet, - * ::cuDeviceTotalMem, - * ::cudaGetDeviceProperties - */ -CUresult CUDAAPI cuDeviceGetUuid_v2(CUuuid *uuid, CUdevice dev); - -/** - * \brief Return an LUID and device node mask for the device - * - * Return identifying information (\p luid and \p deviceNodeMask) to allow - * matching device with graphics APIs. - * - * \param luid - Returned LUID - * \param deviceNodeMask - Returned device node mask - * \param dev - Device to get identifier string for - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa - * ::cuDeviceGetAttribute, - * ::cuDeviceGetCount, - * ::cuDeviceGetName, - * ::cuDeviceGet, - * ::cuDeviceTotalMem, - * ::cuDeviceGetExecAffinitySupport, - * ::cudaGetDeviceProperties - */ -CUresult CUDAAPI cuDeviceGetLuid(char *luid, unsigned int *deviceNodeMask, CUdevice dev); - -/** - * \brief Returns the total amount of memory on the device - * - * Returns in \p *bytes the total amount of memory available on the device - * \p dev in bytes. - * - * \param bytes - Returned memory available on device in bytes - * \param dev - Device handle - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa - * ::cuDeviceGetAttribute, - * ::cuDeviceGetCount, - * ::cuDeviceGetName, - * ::cuDeviceGetUuid, - * ::cuDeviceGet, - * ::cuDeviceGetExecAffinitySupport, - * ::cudaMemGetInfo - */ -CUresult CUDAAPI cuDeviceTotalMem(size_t *bytes, CUdevice dev); - -/** - * \brief Returns the maximum number of elements allocatable in a 1D linear texture for a given texture element size. - * - * Returns in \p maxWidthInElements the maximum number of texture elements allocatable in a 1D linear texture - * for given \p format and \p numChannels. - * - * \param maxWidthInElements - Returned maximum number of texture elements allocatable for given \p format and \p numChannels. - * \param format - Texture format. - * \param numChannels - Number of channels per texture element. - * \param dev - Device handle. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa - * ::cuDeviceGetAttribute, - * ::cuDeviceGetCount, - * ::cuDeviceGetName, - * ::cuDeviceGetUuid, - * ::cuDeviceGet, - * ::cudaMemGetInfo, - * ::cuDeviceTotalMem - */ -CUresult CUDAAPI cuDeviceGetTexture1DLinearMaxWidth(size_t *maxWidthInElements, CUarray_format format, unsigned numChannels, CUdevice dev); - -/** - * \brief Returns information about the device - * - * Returns in \p *pi the integer value of the attribute \p attrib on device - * \p dev. The supported attributes are: - * - ::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK: Maximum number of threads per - * block; - * - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X: Maximum x-dimension of a block; - * - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y: Maximum y-dimension of a block; - * - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z: Maximum z-dimension of a block; - * - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X: Maximum x-dimension of a grid; - * - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y: Maximum y-dimension of a grid; - * - ::CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z: Maximum z-dimension of a grid; - * - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK: Maximum amount of - * shared memory available to a thread block in bytes; - * - ::CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY: Memory available on device for - * __constant__ variables in a CUDA C kernel in bytes; - * - ::CU_DEVICE_ATTRIBUTE_WARP_SIZE: Warp size in threads; - * - ::CU_DEVICE_ATTRIBUTE_MAX_PITCH: Maximum pitch in bytes allowed by the - * memory copy functions that involve memory regions allocated through - * ::cuMemAllocPitch(); - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH: Maximum 1D - * texture width; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH: Maximum width - * for a 1D texture bound to linear memory; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH: Maximum - * mipmapped 1D texture width; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH: Maximum 2D - * texture width; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT: Maximum 2D - * texture height; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH: Maximum width - * for a 2D texture bound to linear memory; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT: Maximum height - * for a 2D texture bound to linear memory; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH: Maximum pitch - * in bytes for a 2D texture bound to linear memory; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH: Maximum - * mipmapped 2D texture width; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT: Maximum - * mipmapped 2D texture height; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH: Maximum 3D - * texture width; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT: Maximum 3D - * texture height; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH: Maximum 3D - * texture depth; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE: - * Alternate maximum 3D texture width, 0 if no alternate - * maximum 3D texture size is supported; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE: - * Alternate maximum 3D texture height, 0 if no alternate - * maximum 3D texture size is supported; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE: - * Alternate maximum 3D texture depth, 0 if no alternate - * maximum 3D texture size is supported; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH: - * Maximum cubemap texture width or height; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH: - * Maximum 1D layered texture width; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS: - * Maximum layers in a 1D layered texture; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH: - * Maximum 2D layered texture width; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT: - * Maximum 2D layered texture height; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS: - * Maximum layers in a 2D layered texture; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH: - * Maximum cubemap layered texture width or height; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS: - * Maximum layers in a cubemap layered texture; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH: - * Maximum 1D surface width; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH: - * Maximum 2D surface width; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT: - * Maximum 2D surface height; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH: - * Maximum 3D surface width; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT: - * Maximum 3D surface height; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH: - * Maximum 3D surface depth; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH: - * Maximum 1D layered surface width; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS: - * Maximum layers in a 1D layered surface; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH: - * Maximum 2D layered surface width; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT: - * Maximum 2D layered surface height; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LAYERS: - * Maximum layers in a 2D layered surface; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH: - * Maximum cubemap surface width; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH: - * Maximum cubemap layered surface width; - * - ::CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS: - * Maximum layers in a cubemap layered surface; - * - ::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK: Maximum number of 32-bit - * registers available to a thread block; - * - ::CU_DEVICE_ATTRIBUTE_CLOCK_RATE: The typical clock frequency in kilohertz; - * - ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT: Alignment requirement; texture - * base addresses aligned to ::textureAlign bytes do not need an offset - * applied to texture fetches; - * - ::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT: Pitch alignment requirement - * for 2D texture references bound to pitched memory; - * - ::CU_DEVICE_ATTRIBUTE_GPU_OVERLAP: 1 if the device can concurrently copy - * memory between host and device while executing a kernel, or 0 if not; - * - ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT: Number of multiprocessors on - * the device; - * - ::CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT: 1 if there is a run time limit - * for kernels executed on the device, or 0 if not; - * - ::CU_DEVICE_ATTRIBUTE_INTEGRATED: 1 if the device is integrated with the - * memory subsystem, or 0 if not; - * - ::CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY: 1 if the device can map host - * memory into the CUDA address space, or 0 if not; - * - ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE: Compute mode that device is currently - * in. Available modes are as follows: - * - ::CU_COMPUTEMODE_DEFAULT: Default mode - Device is not restricted and - * can have multiple CUDA contexts present at a single time. - * - ::CU_COMPUTEMODE_PROHIBITED: Compute-prohibited mode - Device is - * prohibited from creating new CUDA contexts. - * - ::CU_COMPUTEMODE_EXCLUSIVE_PROCESS: Compute-exclusive-process mode - Device - * can have only one context used by a single process at a time. - * - ::CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS: 1 if the device supports - * executing multiple kernels within the same context simultaneously, or 0 if - * not. It is not guaranteed that multiple kernels will be resident - * on the device concurrently so this feature should not be relied upon for - * correctness; - * - ::CU_DEVICE_ATTRIBUTE_ECC_ENABLED: 1 if error correction is enabled on the - * device, 0 if error correction is disabled or not supported by the device; - * - ::CU_DEVICE_ATTRIBUTE_PCI_BUS_ID: PCI bus identifier of the device; - * - ::CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID: PCI device (also known as slot) identifier - * of the device; - * - ::CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID: PCI domain identifier of the device - * - ::CU_DEVICE_ATTRIBUTE_TCC_DRIVER: 1 if the device is using a TCC driver. TCC - * is only available on Tesla hardware running Windows Vista or later; - * - ::CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE: Peak memory clock frequency in kilohertz; - * - ::CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH: Global memory bus width in bits; - * - ::CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE: Size of L2 cache in bytes. 0 if the device doesn't have L2 cache; - * - ::CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR: Maximum resident threads per multiprocessor; - * - ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING: 1 if the device shares a unified address space with - * the host, or 0 if not; - * - ::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR: Major compute capability version number; - * - ::CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR: Minor compute capability version number; - * - ::CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED: 1 if device supports caching globals - * in L1 cache, 0 if caching globals in L1 cache is not supported by the device; - * - ::CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED: 1 if device supports caching locals - * in L1 cache, 0 if caching locals in L1 cache is not supported by the device; - * - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR: Maximum amount of - * shared memory available to a multiprocessor in bytes; this amount is shared - * by all thread blocks simultaneously resident on a multiprocessor; - * - ::CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR: Maximum number of 32-bit - * registers available to a multiprocessor; this number is shared by all thread - * blocks simultaneously resident on a multiprocessor; - * - ::CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY: 1 if device supports allocating managed memory - * on this system, 0 if allocating managed memory is not supported by the device on this system. - * - ::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD: 1 if device is on a multi-GPU board, 0 if not. - * - ::CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID: Unique identifier for a group of devices - * associated with the same board. Devices on the same multi-GPU board will share the same identifier. - * - ::CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED: 1 if Link between the device and the host - * supports native atomic operations. - * - ::CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO: Ratio of single precision performance - * (in floating-point operations per second) to double precision performance. - * - ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS: Device supports coherently accessing - * pageable memory without calling cudaHostRegister on it. - * - ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS: Device can coherently access managed memory - * concurrently with the CPU. - * - ::CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED: Device supports Compute Preemption. - * - ::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM: Device can access host registered - * memory at the same virtual address as the CPU. - * - ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN: The maximum per block shared memory size - * supported on this device. This is the maximum value that can be opted into when using the cuFuncSetAttribute() call. - * For more details see ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES - * - ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES: Device accesses pageable memory via the host's - * page tables. - * - ::CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST: The host can directly access managed memory on the device without migration. - * - ::CU_DEVICE_ATTRIBUTE_VIRTUAL_MEMORY_MANAGEMENT_SUPPORTED: Device supports virtual memory management APIs like ::cuMemAddressReserve, ::cuMemCreate, ::cuMemMap and related APIs - * - ::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_POSIX_FILE_DESCRIPTOR_SUPPORTED: Device supports exporting memory to a posix file descriptor with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate - * - ::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_HANDLE_SUPPORTED: Device supports exporting memory to a Win32 NT handle with ::cuMemExportToShareableHandle, if requested via ::cuMemCreate - * - ::CU_DEVICE_ATTRIBUTE_HANDLE_TYPE_WIN32_KMT_HANDLE_SUPPORTED: Device supports exporting memory to a Win32 KMT handle with ::cuMemExportToShareableHandle, if requested ::cuMemCreate - * - ::CU_DEVICE_ATTRIBUTE_MAX_PERSISTING_L2_CACHE_SIZE: Maximum L2 persisting lines capacity setting in bytes. - * - ::CU_DEVICE_ATTRIBUTE_MAX_ACCESS_POLICY_WINDOW_SIZE: Maximum value of CUaccessPolicyWindow::num_bytes. - * - ::CU_DEVICE_ATTRIBUTE_MAX_BLOCKS_PER_MULTIPROCESSOR: Maximum number of thread blocks that can reside on a multiprocessor. - * - ::CU_DEVICE_ATTRIBUTE_GENERIC_COMPRESSION_SUPPORTED: Device supports compressible memory allocation via ::cuMemCreate - * - ::CU_DEVICE_ATTRIBUTE_RESERVED_SHARED_MEMORY_PER_BLOCK: Amount of shared memory per block reserved by CUDA driver in bytes. - * - ::CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED: Device supports using the ::cuMemHostRegister flag CU_MEMHOSTERGISTER_READ_ONLY to register memory that must be mapped as read-only to the GPU - * - ::CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED: Device supports using the ::cuMemAllocAsync and ::cuMemPool family of APIs - * - * \param pi - Returned device attribute value - * \param attrib - Device attribute to query - * \param dev - Device handle - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa - * ::cuDeviceGetCount, - * ::cuDeviceGetName, - * ::cuDeviceGetUuid, - * ::cuDeviceGet, - * ::cuDeviceTotalMem, - * ::cuDeviceGetExecAffinitySupport, - * ::cudaDeviceGetAttribute, - * ::cudaGetDeviceProperties - */ -CUresult CUDAAPI cuDeviceGetAttribute(int *pi, CUdevice_attribute attrib, CUdevice dev); - -/** - * \brief Return NvSciSync attributes that this device can support. - * - * Returns in \p nvSciSyncAttrList, the properties of NvSciSync that - * this CUDA device, \p dev can support. The returned \p nvSciSyncAttrList - * can be used to create an NvSciSync object that matches this device's capabilities. - * - * If NvSciSyncAttrKey_RequiredPerm field in \p nvSciSyncAttrList is - * already set this API will return ::CUDA_ERROR_INVALID_VALUE. - * - * The applications should set \p nvSciSyncAttrList to a valid - * NvSciSyncAttrList failing which this API will return - * ::CUDA_ERROR_INVALID_HANDLE. - * - * The \p flags controls how applications intends to use - * the NvSciSync created from the \p nvSciSyncAttrList. The valid flags are: - * - ::CUDA_NVSCISYNC_ATTR_SIGNAL, specifies that the applications intends to - * signal an NvSciSync on this CUDA device. - * - ::CUDA_NVSCISYNC_ATTR_WAIT, specifies that the applications intends to - * wait on an NvSciSync on this CUDA device. - * - * At least one of these flags must be set, failing which the API - * returns ::CUDA_ERROR_INVALID_VALUE. Both the flags are orthogonal - * to one another: a developer may set both these flags that allows to - * set both wait and signal specific attributes in the same \p nvSciSyncAttrList. - * - * \param nvSciSyncAttrList - Return NvSciSync attributes supported. - * \param dev - Valid Cuda Device to get NvSciSync attributes for. - * \param flags - flags describing NvSciSync usage. - * - * \return - * - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_INVALID_DEVICE, - * ::CUDA_ERROR_NOT_SUPPORTED, - * ::CUDA_ERROR_OUT_OF_MEMORY - * - * \sa - * ::cuImportExternalSemaphore, - * ::cuDestroyExternalSemaphore, - * ::cuSignalExternalSemaphoresAsync, - * ::cuWaitExternalSemaphoresAsync - */ -CUresult CUDAAPI cuDeviceGetNvSciSyncAttributes(void *nvSciSyncAttrList, CUdevice dev, int flags); - -/** - * \brief Sets the current memory pool of a device - * - * The memory pool must be local to the specified device. - * ::cuMemAllocAsync allocates from the current mempool of the provided stream's device. - * By default, a device's current memory pool is its default memory pool. - * - * \note Use ::cuMemAllocFromPoolAsync to specify asynchronous allocations from a device different - * than the one the stream runs on. - * - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuDeviceGetDefaultMemPool, ::cuDeviceGetMemPool, ::cuMemPoolCreate, ::cuMemPoolDestroy, ::cuMemAllocFromPoolAsync - */ -CUresult CUDAAPI cuDeviceSetMemPool(CUdevice dev, CUmemoryPool pool); - -/** - * \brief Gets the current mempool for a device - * - * Returns the last pool provided to ::cuDeviceSetMemPool for this device - * or the device's default memory pool if ::cuDeviceSetMemPool has never been called. - * By default the current mempool is the default mempool for a device. - * Otherwise the returned pool must have been set with ::cuDeviceSetMemPool. - * - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuDeviceGetDefaultMemPool, ::cuMemPoolCreate, ::cuDeviceSetMemPool - */ -CUresult CUDAAPI cuDeviceGetMemPool(CUmemoryPool *pool, CUdevice dev); - -/** - * \brief Returns the default mempool of a device - * - * The default mempool of a device contains device memory from that device. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE, - * ::CUDA_ERROR_NOT_SUPPORTED - * \notefnerr - * - * \sa ::cuMemAllocAsync, ::cuMemPoolTrimTo, ::cuMemPoolGetAttribute, ::cuMemPoolSetAttribute, cuMemPoolSetAccess, ::cuDeviceGetMemPool, ::cuMemPoolCreate - */ -CUresult CUDAAPI cuDeviceGetDefaultMemPool(CUmemoryPool *pool_out, CUdevice dev); - -/** - * \brief Blocks until remote writes are visible to the specified scope - * - * Blocks until GPUDirect RDMA writes to the target context via mappings - * created through APIs like nvidia_p2p_get_pages (see - * https://docs.nvidia.com/cuda/gpudirect-rdma for more information), are - * visible to the specified scope. - * - * If the scope equals or lies within the scope indicated by - * ::CU_DEVICE_ATTRIBUTE_GPU_DIRECT_RDMA_WRITES_ORDERING, the call - * will be a no-op and can be safely omitted for performance. This can be - * determined by comparing the numerical values between the two enums, with - * smaller scopes having smaller values. - * - * Users may query support for this API via - * ::CU_DEVICE_ATTRIBUTE_FLUSH_FLUSH_GPU_DIRECT_RDMA_OPTIONS. - * - * \param target - The target of the operation, see ::CUflushGPUDirectRDMAWritesTarget - * \param scope - The scope of the operation, see ::CUflushGPUDirectRDMAWritesScope - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * \notefnerr - * - */ -CUresult CUDAAPI cuFlushGPUDirectRDMAWrites(CUflushGPUDirectRDMAWritesTarget target, CUflushGPUDirectRDMAWritesScope scope); - -/** @} */ /* END CUDA_DEVICE */ - -/** - * \defgroup CUDA_DEVICE_DEPRECATED Device Management [DEPRECATED] - * - * ___MANBRIEF___ deprecated device management functions of the low-level CUDA - * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the device management functions of the low-level - * CUDA driver application programming interface. - * - * @{ - */ - -/** - * \brief Returns properties for a selected device - * - * \deprecated - * - * This function was deprecated as of CUDA 5.0 and replaced by ::cuDeviceGetAttribute(). - * - * Returns in \p *prop the properties of device \p dev. The ::CUdevprop - * structure is defined as: - * - * \code - typedef struct CUdevprop_st { - int maxThreadsPerBlock; - int maxThreadsDim[3]; - int maxGridSize[3]; - int sharedMemPerBlock; - int totalConstantMemory; - int SIMDWidth; - int memPitch; - int regsPerBlock; - int clockRate; - int textureAlign - } CUdevprop; - * \endcode - * where: - * - * - ::maxThreadsPerBlock is the maximum number of threads per block; - * - ::maxThreadsDim[3] is the maximum sizes of each dimension of a block; - * - ::maxGridSize[3] is the maximum sizes of each dimension of a grid; - * - ::sharedMemPerBlock is the total amount of shared memory available per - * block in bytes; - * - ::totalConstantMemory is the total amount of constant memory available on - * the device in bytes; - * - ::SIMDWidth is the warp size; - * - ::memPitch is the maximum pitch allowed by the memory copy functions that - * involve memory regions allocated through ::cuMemAllocPitch(); - * - ::regsPerBlock is the total number of registers available per block; - * - ::clockRate is the clock frequency in kilohertz; - * - ::textureAlign is the alignment requirement; texture base addresses that - * are aligned to ::textureAlign bytes do not need an offset applied to - * texture fetches. - * - * \param prop - Returned properties of device - * \param dev - Device to get properties for - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa - * ::cuDeviceGetAttribute, - * ::cuDeviceGetCount, - * ::cuDeviceGetName, - * ::cuDeviceGetUuid, - * ::cuDeviceGet, - * ::cuDeviceTotalMem - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuDeviceGetProperties(CUdevprop *prop, CUdevice dev); - -/** - * \brief Returns the compute capability of the device - * - * \deprecated - * - * This function was deprecated as of CUDA 5.0 and its functionality superseded - * by ::cuDeviceGetAttribute(). - * - * Returns in \p *major and \p *minor the major and minor revision numbers that - * define the compute capability of the device \p dev. - * - * \param major - Major revision number - * \param minor - Minor revision number - * \param dev - Device handle - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa - * ::cuDeviceGetAttribute, - * ::cuDeviceGetCount, - * ::cuDeviceGetName, - * ::cuDeviceGetUuid, - * ::cuDeviceGet, - * ::cuDeviceTotalMem - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuDeviceComputeCapability(int *major, int *minor, CUdevice dev); - -/** @} */ /* END CUDA_DEVICE_DEPRECATED */ - -/** - * \defgroup CUDA_PRIMARY_CTX Primary Context Management - * - * ___MANBRIEF___ primary context management functions of the low-level CUDA driver - * API (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the primary context management functions of the low-level - * CUDA driver application programming interface. - * - * The primary context is unique per device and shared with the CUDA runtime API. - * These functions allow integration with other libraries using CUDA. - * - * @{ - */ - -/** - * \brief Retain the primary context on the GPU - * - * Retains the primary context on the device. - * Once the user successfully retains the primary context, the primary context - * will be active and available to the user until the user releases it - * with ::cuDevicePrimaryCtxRelease() or resets it with ::cuDevicePrimaryCtxReset(). - * Unlike ::cuCtxCreate() the newly retained context is not pushed onto the stack. - * - * Retaining the primary context for the first time will fail with ::CUDA_ERROR_UNKNOWN - * if the compute mode of the device is ::CU_COMPUTEMODE_PROHIBITED. The function - * ::cuDeviceGetAttribute() can be used with ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to - * determine the compute mode of the device. - * The nvidia-smi tool can be used to set the compute mode for - * devices. Documentation for nvidia-smi can be obtained by passing a - * -h option to it. - * - * Please note that the primary context always supports pinned allocations. Other - * flags can be specified by ::cuDevicePrimaryCtxSetFlags(). - * - * \param pctx - Returned context handle of the new context - * \param dev - Device for which primary context is requested - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_DEVICE, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_UNKNOWN - * \notefnerr - * - * \sa ::cuDevicePrimaryCtxRelease, - * ::cuDevicePrimaryCtxSetFlags, - * ::cuCtxCreate, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPopCurrent, - * ::cuCtxPushCurrent, - * ::cuCtxSetCacheConfig, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize - */ -CUresult CUDAAPI cuDevicePrimaryCtxRetain(CUcontext *pctx, CUdevice dev); - -/** - * \brief Release the primary context on the GPU - * - * Releases the primary context interop on the device. - * A retained context should always be released once the user is done using - * it. The context is automatically reset once the last reference to it is - * released. This behavior is different when the primary context was retained - * by the CUDA runtime from CUDA 4.0 and earlier. In this case, the primary - * context remains always active. - * - * Releasing a primary context that has not been previously retained will - * fail with ::CUDA_ERROR_INVALID_CONTEXT. - * - * Please note that unlike ::cuCtxDestroy() this method does not pop the context - * from stack in any circumstances. - * - * \param dev - Device which primary context is released - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_DEVICE, - * ::CUDA_ERROR_INVALID_CONTEXT - * \notefnerr - * - * \sa ::cuDevicePrimaryCtxRetain, - * ::cuCtxDestroy, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPopCurrent, - * ::cuCtxPushCurrent, - * ::cuCtxSetCacheConfig, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize - */ -CUresult CUDAAPI cuDevicePrimaryCtxRelease(CUdevice dev); - -/** - * \brief Set flags for the primary context - * - * Sets the flags for the primary context on the device overwriting perviously - * set ones. - * - * The three LSBs of the \p flags parameter can be used to control how the OS - * thread, which owns the CUDA context at the time of an API call, interacts - * with the OS scheduler when waiting for results from the GPU. Only one of - * the scheduling flags can be set when creating a context. - * - * - ::CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when waiting for - * results from the GPU. This can decrease latency when waiting for the GPU, - * but may lower the performance of CPU threads if they are performing work in - * parallel with the CUDA thread. - * - * - ::CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread when waiting for - * results from the GPU. This can increase latency when waiting for the GPU, - * but can increase the performance of CPU threads performing work in parallel - * with the GPU. - * - * - ::CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a - * synchronization primitive when waiting for the GPU to finish work. - * - * - ::CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a - * synchronization primitive when waiting for the GPU to finish work.
- * Deprecated: This flag was deprecated as of CUDA 4.0 and was - * replaced with ::CU_CTX_SCHED_BLOCKING_SYNC. - * - * - ::CU_CTX_SCHED_AUTO: The default value if the \p flags parameter is zero, - * uses a heuristic based on the number of active CUDA contexts in the - * process \e C and the number of logical processors in the system \e P. If - * \e C > \e P, then CUDA will yield to other OS threads when waiting for - * the GPU (::CU_CTX_SCHED_YIELD), otherwise CUDA will not yield while - * waiting for results and actively spin on the processor (::CU_CTX_SCHED_SPIN). - * Additionally, on Tegra devices, ::CU_CTX_SCHED_AUTO uses a heuristic based on - * the power profile of the platform and may choose ::CU_CTX_SCHED_BLOCKING_SYNC - * for low-powered devices. - * - * - ::CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce local memory - * after resizing local memory for a kernel. This can prevent thrashing by - * local memory allocations when launching many kernels with high local - * memory usage at the cost of potentially increased memory usage.
- * Deprecated: This flag is deprecated and the behavior enabled - * by this flag is now the default and cannot be disabled. - * - * \param dev - Device for which the primary context flags are set - * \param flags - New flags for the device - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_DEVICE, - * ::CUDA_ERROR_INVALID_VALUE, - * \notefnerr - * - * \sa ::cuDevicePrimaryCtxRetain, - * ::cuDevicePrimaryCtxGetState, - * ::cuCtxCreate, - * ::cuCtxGetFlags, - * ::cudaSetDeviceFlags - */ -CUresult CUDAAPI cuDevicePrimaryCtxSetFlags(CUdevice dev, unsigned int flags); - -/** - * \brief Get the state of the primary context - * - * Returns in \p *flags the flags for the primary context of \p dev, and in - * \p *active whether it is active. See ::cuDevicePrimaryCtxSetFlags for flag - * values. - * - * \param dev - Device to get primary context flags for - * \param flags - Pointer to store flags - * \param active - Pointer to store context state; 0 = inactive, 1 = active - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_DEVICE, - * ::CUDA_ERROR_INVALID_VALUE, - * \notefnerr - * - * \sa - * ::cuDevicePrimaryCtxSetFlags, - * ::cuCtxGetFlags, - * ::cudaGetDeviceFlags - */ -CUresult CUDAAPI cuDevicePrimaryCtxGetState(CUdevice dev, unsigned int *flags, int *active); - -/** - * \brief Destroy all allocations and reset all state on the primary context - * - * Explicitly destroys and cleans up all resources associated with the current - * device in the current process. - * - * Note that it is responsibility of the calling function to ensure that no - * other module in the process is using the device any more. For that reason - * it is recommended to use ::cuDevicePrimaryCtxRelease() in most cases. - * However it is safe for other modules to call ::cuDevicePrimaryCtxRelease() - * even after resetting the device. - * Resetting the primary context does not release it, an application that has - * retained the primary context should explicitly release its usage. - * - * \param dev - Device for which primary context is destroyed - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_DEVICE, - * ::CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE - * \notefnerr - * - * \sa ::cuDevicePrimaryCtxRetain, - * ::cuDevicePrimaryCtxRelease, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPopCurrent, - * ::cuCtxPushCurrent, - * ::cuCtxSetCacheConfig, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize, - * ::cudaDeviceReset - */ -CUresult CUDAAPI cuDevicePrimaryCtxReset(CUdevice dev); - -/** @} */ /* END CUDA_PRIMARY_CTX */ - -/** - * \brief Returns information about the execution affinity support of the device. - * - * Returns in \p *pi whether execution affinity type \p type is supported by device \p dev. - * The supported types are: - * - ::CU_EXEC_AFFINITY_TYPE_SM_COUNT: 1 if context with limited SMs is supported by the device, - * or 0 if not; - * - * \param pi - 1 if the execution affinity type \p type is supported by the device, or 0 if not - * \param type - Execution affinity type to query - * \param dev - Device handle - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa - * ::cuDeviceGetAttribute, - * ::cuDeviceGetCount, - * ::cuDeviceGetName, - * ::cuDeviceGetUuid, - * ::cuDeviceGet, - * ::cuDeviceTotalMem - */ -CUresult CUDAAPI cuDeviceGetExecAffinitySupport(int *pi, CUexecAffinityType type, CUdevice dev); - -/** - * \defgroup CUDA_CTX Context Management - * - * ___MANBRIEF___ context management functions of the low-level CUDA driver - * API (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the context management functions of the low-level - * CUDA driver application programming interface. - * - * Please note that some functions are described in - * \ref CUDA_PRIMARY_CTX "Primary Context Management" section. - * - * @{ - */ - -/** - * \brief Create a CUDA context - * - * \note In most cases it is recommended to use ::cuDevicePrimaryCtxRetain. - * - * Creates a new CUDA context and associates it with the calling thread. The - * \p flags parameter is described below. The context is created with a usage - * count of 1 and the caller of ::cuCtxCreate() must call ::cuCtxDestroy() or - * when done using the context. If a context is already current to the thread, - * it is supplanted by the newly created context and may be restored by a subsequent - * call to ::cuCtxPopCurrent(). - * - * The three LSBs of the \p flags parameter can be used to control how the OS - * thread, which owns the CUDA context at the time of an API call, interacts - * with the OS scheduler when waiting for results from the GPU. Only one of - * the scheduling flags can be set when creating a context. - * - * - ::CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when waiting for - * results from the GPU. This can decrease latency when waiting for the GPU, - * but may lower the performance of CPU threads if they are performing work in - * parallel with the CUDA thread. - * - * - ::CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread when waiting for - * results from the GPU. This can increase latency when waiting for the GPU, - * but can increase the performance of CPU threads performing work in parallel - * with the GPU. - * - * - ::CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a - * synchronization primitive when waiting for the GPU to finish work. - * - * - ::CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a - * synchronization primitive when waiting for the GPU to finish work.
- * Deprecated: This flag was deprecated as of CUDA 4.0 and was - * replaced with ::CU_CTX_SCHED_BLOCKING_SYNC. - * - * - ::CU_CTX_SCHED_AUTO: The default value if the \p flags parameter is zero, - * uses a heuristic based on the number of active CUDA contexts in the - * process \e C and the number of logical processors in the system \e P. If - * \e C > \e P, then CUDA will yield to other OS threads when waiting for - * the GPU (::CU_CTX_SCHED_YIELD), otherwise CUDA will not yield while - * waiting for results and actively spin on the processor (::CU_CTX_SCHED_SPIN). - * Additionally, on Tegra devices, ::CU_CTX_SCHED_AUTO uses a heuristic based on - * the power profile of the platform and may choose ::CU_CTX_SCHED_BLOCKING_SYNC - * for low-powered devices. - * - * - ::CU_CTX_MAP_HOST: Instruct CUDA to support mapped pinned allocations. - * This flag must be set in order to allocate pinned host memory that is - * accessible to the GPU. - * - * - ::CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce local memory - * after resizing local memory for a kernel. This can prevent thrashing by - * local memory allocations when launching many kernels with high local - * memory usage at the cost of potentially increased memory usage.
- * Deprecated: This flag is deprecated and the behavior enabled - * by this flag is now the default and cannot be disabled. - * Instead, the per-thread stack size can be controlled with ::cuCtxSetLimit(). - * - * Context creation will fail with ::CUDA_ERROR_UNKNOWN if the compute mode of - * the device is ::CU_COMPUTEMODE_PROHIBITED. The function ::cuDeviceGetAttribute() - * can be used with ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the - * compute mode of the device. The nvidia-smi tool can be used to set - * the compute mode for * devices. - * Documentation for nvidia-smi can be obtained by passing a - * -h option to it. - * - * \param pctx - Returned context handle of the new context - * \param flags - Context creation flags - * \param dev - Device to create context on - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_DEVICE, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_UNKNOWN - * \notefnerr - * - * \sa ::cuCtxDestroy, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPopCurrent, - * ::cuCtxPushCurrent, - * ::cuCtxSetCacheConfig, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize - */ -CUresult CUDAAPI cuCtxCreate(CUcontext *pctx, unsigned int flags, CUdevice dev); - -/** - * \brief Create a CUDA context with execution affinity - * - * Creates a new CUDA context with execution affinity and associates it with - * the calling thread. The \p paramsArray and \p flags parameter are described below. - * The context is created with a usage count of 1 and the caller of ::cuCtxCreate() must - * call ::cuCtxDestroy() or when done using the context. If a context is already - * current to the thread, it is supplanted by the newly created context and may - * be restored by a subsequent call to ::cuCtxPopCurrent(). - * - * The type and the amount of execution resource the context can use is limited by \p paramsArray - * and \p numParams. The \p paramsArray is an array of \p CUexecAffinityParam and the \p numParams - * describes the size of the array. If two \p CUexecAffinityParam in the array have the same type, - * the latter execution affinity parameter overrides the former execution affinity parameter. - * The supported execution affinity types are: - * - ::CU_EXEC_AFFINITY_TYPE_SM_COUNT limits the portion of SMs that the context can use. The portion - * of SMs is specified as the number of SMs via \p CUexecAffinitySmCount. This limit will be internally - * rounded up to the next hardware-supported amount. Hence, it is imperative to query the actual execution - * affinity of the context via \p cuCtxGetExecAffinity after context creation. Currently, this attribute - * is only supported under Volta+ MPS. - * - * The three LSBs of the \p flags parameter can be used to control how the OS - * thread, which owns the CUDA context at the time of an API call, interacts - * with the OS scheduler when waiting for results from the GPU. Only one of - * the scheduling flags can be set when creating a context. - * - * - ::CU_CTX_SCHED_SPIN: Instruct CUDA to actively spin when waiting for - * results from the GPU. This can decrease latency when waiting for the GPU, - * but may lower the performance of CPU threads if they are performing work in - * parallel with the CUDA thread. - * - * - ::CU_CTX_SCHED_YIELD: Instruct CUDA to yield its thread when waiting for - * results from the GPU. This can increase latency when waiting for the GPU, - * but can increase the performance of CPU threads performing work in parallel - * with the GPU. - * - * - ::CU_CTX_SCHED_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a - * synchronization primitive when waiting for the GPU to finish work. - * - * - ::CU_CTX_BLOCKING_SYNC: Instruct CUDA to block the CPU thread on a - * synchronization primitive when waiting for the GPU to finish work.
- * Deprecated: This flag was deprecated as of CUDA 4.0 and was - * replaced with ::CU_CTX_SCHED_BLOCKING_SYNC. - * - * - ::CU_CTX_SCHED_AUTO: The default value if the \p flags parameter is zero, - * uses a heuristic based on the number of active CUDA contexts in the - * process \e C and the number of logical processors in the system \e P. If - * \e C > \e P, then CUDA will yield to other OS threads when waiting for - * the GPU (::CU_CTX_SCHED_YIELD), otherwise CUDA will not yield while - * waiting for results and actively spin on the processor (::CU_CTX_SCHED_SPIN). - * Additionally, on Tegra devices, ::CU_CTX_SCHED_AUTO uses a heuristic based on - * the power profile of the platform and may choose ::CU_CTX_SCHED_BLOCKING_SYNC - * for low-powered devices. - * - * - ::CU_CTX_MAP_HOST: Instruct CUDA to support mapped pinned allocations. - * This flag must be set in order to allocate pinned host memory that is - * accessible to the GPU. - * - * - ::CU_CTX_LMEM_RESIZE_TO_MAX: Instruct CUDA to not reduce local memory - * after resizing local memory for a kernel. This can prevent thrashing by - * local memory allocations when launching many kernels with high local - * memory usage at the cost of potentially increased memory usage.
- * Deprecated: This flag is deprecated and the behavior enabled - * by this flag is now the default and cannot be disabled. - * Instead, the per-thread stack size can be controlled with ::cuCtxSetLimit(). - * - * Context creation will fail with ::CUDA_ERROR_UNKNOWN if the compute mode of - * the device is ::CU_COMPUTEMODE_PROHIBITED. The function ::cuDeviceGetAttribute() - * can be used with ::CU_DEVICE_ATTRIBUTE_COMPUTE_MODE to determine the - * compute mode of the device. The nvidia-smi tool can be used to set - * the compute mode for * devices. - * Documentation for nvidia-smi can be obtained by passing a - * -h option to it. - * - * \param pctx - Returned context handle of the new context - * \param paramsArray - Execution affinity parameters - * \param numParams - Number of execution affinity parameters - * \param flags - Context creation flags - * \param dev - Device to create context on - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_DEVICE, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY, - * ::CUDA_ERROR_UNKNOWN - * \notefnerr - * - * \sa ::cuCtxDestroy, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPopCurrent, - * ::cuCtxPushCurrent, - * ::cuCtxSetCacheConfig, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize, - * ::CUexecAffinityParam - */ -CUresult CUDAAPI cuCtxCreate_v3(CUcontext *pctx, CUexecAffinityParam *paramsArray, int numParams, unsigned int flags, CUdevice dev); - -/** - * \brief Destroy a CUDA context - * - * Destroys the CUDA context specified by \p ctx. The context \p ctx will be - * destroyed regardless of how many threads it is current to. - * It is the responsibility of the calling function to ensure that no API - * call issues using \p ctx while ::cuCtxDestroy() is executing. - * - * If \p ctx is current to the calling thread then \p ctx will also be - * popped from the current thread's context stack (as though ::cuCtxPopCurrent() - * were called). If \p ctx is current to other threads, then \p ctx will - * remain current to those threads, and attempting to access \p ctx from - * those threads will result in the error ::CUDA_ERROR_CONTEXT_IS_DESTROYED. - * - * \param ctx - Context to destroy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuCtxCreate, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPopCurrent, - * ::cuCtxPushCurrent, - * ::cuCtxSetCacheConfig, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize - */ -CUresult CUDAAPI cuCtxDestroy(CUcontext ctx); - -/** - * \brief Pushes a context on the current CPU thread - * - * Pushes the given context \p ctx onto the CPU thread's stack of current - * contexts. The specified context becomes the CPU thread's current context, so - * all CUDA functions that operate on the current context are affected. - * - * The previous current context may be made current again by calling - * ::cuCtxDestroy() or ::cuCtxPopCurrent(). - * - * \param ctx - Context to push - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuCtxCreate, - * ::cuCtxDestroy, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPopCurrent, - * ::cuCtxSetCacheConfig, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize - */ -CUresult CUDAAPI cuCtxPushCurrent(CUcontext ctx); - -/** - * \brief Pops the current CUDA context from the current CPU thread. - * - * Pops the current CUDA context from the CPU thread and passes back the - * old context handle in \p *pctx. That context may then be made current - * to a different CPU thread by calling ::cuCtxPushCurrent(). - * - * If a context was current to the CPU thread before ::cuCtxCreate() or - * ::cuCtxPushCurrent() was called, this function makes that context current to - * the CPU thread again. - * - * \param pctx - Returned new context handle - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT - * \notefnerr - * - * \sa ::cuCtxCreate, - * ::cuCtxDestroy, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPushCurrent, - * ::cuCtxSetCacheConfig, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize - */ -CUresult CUDAAPI cuCtxPopCurrent(CUcontext *pctx); - -/** - * \brief Binds the specified CUDA context to the calling CPU thread - * - * Binds the specified CUDA context to the calling CPU thread. - * If \p ctx is NULL then the CUDA context previously bound to the - * calling CPU thread is unbound and ::CUDA_SUCCESS is returned. - * - * If there exists a CUDA context stack on the calling CPU thread, this - * will replace the top of that stack with \p ctx. - * If \p ctx is NULL then this will be equivalent to popping the top - * of the calling CPU thread's CUDA context stack (or a no-op if the - * calling CPU thread's CUDA context stack is empty). - * - * \param ctx - Context to bind to the calling CPU thread - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT - * \notefnerr - * - * \sa - * ::cuCtxGetCurrent, - * ::cuCtxCreate, - * ::cuCtxDestroy, - * ::cudaSetDevice - */ -CUresult CUDAAPI cuCtxSetCurrent(CUcontext ctx); - -/** - * \brief Returns the CUDA context bound to the calling CPU thread. - * - * Returns in \p *pctx the CUDA context bound to the calling CPU thread. - * If no context is bound to the calling CPU thread then \p *pctx is - * set to NULL and ::CUDA_SUCCESS is returned. - * - * \param pctx - Returned context handle - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * \notefnerr - * - * \sa - * ::cuCtxSetCurrent, - * ::cuCtxCreate, - * ::cuCtxDestroy, - * ::cudaGetDevice - */ -CUresult CUDAAPI cuCtxGetCurrent(CUcontext *pctx); - -/** - * \brief Returns the device ID for the current context - * - * Returns in \p *device the ordinal of the current context's device. - * - * \param device - Returned device ID for the current context - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * \notefnerr - * - * \sa ::cuCtxCreate, - * ::cuCtxDestroy, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPopCurrent, - * ::cuCtxPushCurrent, - * ::cuCtxSetCacheConfig, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize, - * ::cudaGetDevice - */ -CUresult CUDAAPI cuCtxGetDevice(CUdevice *device); - -/** - * \brief Returns the flags for the current context - * - * Returns in \p *flags the flags of the current context. See ::cuCtxCreate - * for flag values. - * - * \param flags - Pointer to store flags of current context - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * \notefnerr - * - * \sa ::cuCtxCreate, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetCurrent, - * ::cuCtxGetDevice, - * ::cuCtxGetLimit, - * ::cuCtxGetSharedMemConfig, - * ::cuCtxGetStreamPriorityRange, - * ::cudaGetDeviceFlags - */ -CUresult CUDAAPI cuCtxGetFlags(unsigned int *flags); - -/** - * \brief Block for a context's tasks to complete - * - * Blocks until the device has completed all preceding requested tasks. - * ::cuCtxSynchronize() returns an error if one of the preceding tasks failed. - * If the context was created with the ::CU_CTX_SCHED_BLOCKING_SYNC flag, the - * CPU thread will block until the GPU context has finished its work. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT - * \notefnerr - * - * \sa ::cuCtxCreate, - * ::cuCtxDestroy, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPopCurrent, - * ::cuCtxPushCurrent, - * ::cuCtxSetCacheConfig, - * ::cuCtxSetLimit, - * ::cudaDeviceSynchronize - */ -CUresult CUDAAPI cuCtxSynchronize(void); - -/** - * \brief Set resource limits - * - * Setting \p limit to \p value is a request by the application to update - * the current limit maintained by the context. The driver is free to - * modify the requested value to meet h/w requirements (this could be - * clamping to minimum or maximum values, rounding up to nearest element - * size, etc). The application can use ::cuCtxGetLimit() to find out exactly - * what the limit has been set to. - * - * Setting each ::CUlimit has its own specific restrictions, so each is - * discussed here. - * - * - ::CU_LIMIT_STACK_SIZE controls the stack size in bytes of each GPU thread. - * The driver automatically increases the per-thread stack size - * for each kernel launch as needed. This size isn't reset back to the - * original value after each launch. Setting this value will take effect - * immediately, and if necessary, the device will block until all preceding - * requested tasks are complete. - * - * - ::CU_LIMIT_PRINTF_FIFO_SIZE controls the size in bytes of the FIFO used - * by the ::printf() device system call. Setting ::CU_LIMIT_PRINTF_FIFO_SIZE - * must be performed before launching any kernel that uses the ::printf() - * device system call, otherwise ::CUDA_ERROR_INVALID_VALUE will be returned. - * - * - ::CU_LIMIT_MALLOC_HEAP_SIZE controls the size in bytes of the heap used - * by the ::malloc() and ::free() device system calls. Setting - * ::CU_LIMIT_MALLOC_HEAP_SIZE must be performed before launching any kernel - * that uses the ::malloc() or ::free() device system calls, otherwise - * ::CUDA_ERROR_INVALID_VALUE will be returned. - * - * - ::CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH controls the maximum nesting depth of - * a grid at which a thread can safely call ::cudaDeviceSynchronize(). Setting - * this limit must be performed before any launch of a kernel that uses the - * device runtime and calls ::cudaDeviceSynchronize() above the default sync - * depth, two levels of grids. Calls to ::cudaDeviceSynchronize() will fail - * with error code ::cudaErrorSyncDepthExceeded if the limitation is - * violated. This limit can be set smaller than the default or up the maximum - * launch depth of 24. When setting this limit, keep in mind that additional - * levels of sync depth require the driver to reserve large amounts of device - * memory which can no longer be used for user allocations. If these - * reservations of device memory fail, ::cuCtxSetLimit() will return - * ::CUDA_ERROR_OUT_OF_MEMORY, and the limit can be reset to a lower value. - * This limit is only applicable to devices of compute capability 3.5 and - * higher. Attempting to set this limit on devices of compute capability less - * than 3.5 will result in the error ::CUDA_ERROR_UNSUPPORTED_LIMIT being - * returned. - * - * - ::CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT controls the maximum number of - * outstanding device runtime launches that can be made from the current - * context. A grid is outstanding from the point of launch up until the grid - * is known to have been completed. Device runtime launches which violate - * this limitation fail and return ::cudaErrorLaunchPendingCountExceeded when - * ::cudaGetLastError() is called after launch. If more pending launches than - * the default (2048 launches) are needed for a module using the device - * runtime, this limit can be increased. Keep in mind that being able to - * sustain additional pending launches will require the driver to reserve - * larger amounts of device memory upfront which can no longer be used for - * allocations. If these reservations fail, ::cuCtxSetLimit() will return - * ::CUDA_ERROR_OUT_OF_MEMORY, and the limit can be reset to a lower value. - * This limit is only applicable to devices of compute capability 3.5 and - * higher. Attempting to set this limit on devices of compute capability less - * than 3.5 will result in the error ::CUDA_ERROR_UNSUPPORTED_LIMIT being - * returned. - * - * - ::CU_LIMIT_MAX_L2_FETCH_GRANULARITY controls the L2 cache fetch granularity. - * Values can range from 0B to 128B. This is purely a performance hint and - * it can be ignored or clamped depending on the platform. - * - * - ::CU_LIMIT_PERSISTING_L2_CACHE_SIZE controls size in bytes available for - * persisting L2 cache. This is purely a performance hint and it can be - * ignored or clamped depending on the platform. - * - * \param limit - Limit to set - * \param value - Size of limit - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_UNSUPPORTED_LIMIT, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_INVALID_CONTEXT - * \notefnerr - * - * \sa ::cuCtxCreate, - * ::cuCtxDestroy, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPopCurrent, - * ::cuCtxPushCurrent, - * ::cuCtxSetCacheConfig, - * ::cuCtxSynchronize, - * ::cudaDeviceSetLimit - */ -CUresult CUDAAPI cuCtxSetLimit(CUlimit limit, size_t value); - -/** - * \brief Returns resource limits - * - * Returns in \p *pvalue the current size of \p limit. The supported - * ::CUlimit values are: - * - ::CU_LIMIT_STACK_SIZE: stack size in bytes of each GPU thread. - * - ::CU_LIMIT_PRINTF_FIFO_SIZE: size in bytes of the FIFO used by the - * ::printf() device system call. - * - ::CU_LIMIT_MALLOC_HEAP_SIZE: size in bytes of the heap used by the - * ::malloc() and ::free() device system calls. - * - ::CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH: maximum grid depth at which a thread - * can issue the device runtime call ::cudaDeviceSynchronize() to wait on - * child grid launches to complete. - * - ::CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT: maximum number of outstanding - * device runtime launches that can be made from this context. - * - ::CU_LIMIT_MAX_L2_FETCH_GRANULARITY: L2 cache fetch granularity. - * - ::CU_LIMIT_PERSISTING_L2_CACHE_SIZE: Persisting L2 cache size in bytes - * - * \param limit - Limit to query - * \param pvalue - Returned size of limit - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_UNSUPPORTED_LIMIT - * \notefnerr - * - * \sa ::cuCtxCreate, - * ::cuCtxDestroy, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxPopCurrent, - * ::cuCtxPushCurrent, - * ::cuCtxSetCacheConfig, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize, - * ::cudaDeviceGetLimit - */ -CUresult CUDAAPI cuCtxGetLimit(size_t *pvalue, CUlimit limit); - -/** - * \brief Returns the preferred cache configuration for the current context. - * - * On devices where the L1 cache and shared memory use the same hardware - * resources, this function returns through \p pconfig the preferred cache configuration - * for the current context. This is only a preference. The driver will use - * the requested configuration if possible, but it is free to choose a different - * configuration if required to execute functions. - * - * This will return a \p pconfig of ::CU_FUNC_CACHE_PREFER_NONE on devices - * where the size of the L1 cache and shared memory are fixed. - * - * The supported cache configurations are: - * - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default) - * - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache - * - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory - * - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory - * - * \param pconfig - Returned cache configuration - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuCtxCreate, - * ::cuCtxDestroy, - * ::cuCtxGetApiVersion, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPopCurrent, - * ::cuCtxPushCurrent, - * ::cuCtxSetCacheConfig, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize, - * ::cuFuncSetCacheConfig, - * ::cudaDeviceGetCacheConfig - */ -CUresult CUDAAPI cuCtxGetCacheConfig(CUfunc_cache *pconfig); - -/** - * \brief Sets the preferred cache configuration for the current context. - * - * On devices where the L1 cache and shared memory use the same hardware - * resources, this sets through \p config the preferred cache configuration for - * the current context. This is only a preference. The driver will use - * the requested configuration if possible, but it is free to choose a different - * configuration if required to execute the function. Any function preference - * set via ::cuFuncSetCacheConfig() will be preferred over this context-wide - * setting. Setting the context-wide cache configuration to - * ::CU_FUNC_CACHE_PREFER_NONE will cause subsequent kernel launches to prefer - * to not change the cache configuration unless required to launch the kernel. - * - * This setting does nothing on devices where the size of the L1 cache and - * shared memory are fixed. - * - * Launching a kernel with a different preference than the most recent - * preference setting may insert a device-side synchronization point. - * - * The supported cache configurations are: - * - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default) - * - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache - * - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory - * - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory - * - * \param config - Requested cache configuration - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuCtxCreate, - * ::cuCtxDestroy, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPopCurrent, - * ::cuCtxPushCurrent, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize, - * ::cuFuncSetCacheConfig, - * ::cudaDeviceSetCacheConfig - */ -CUresult CUDAAPI cuCtxSetCacheConfig(CUfunc_cache config); - -/** - * \brief Returns the current shared memory configuration for the current context. - * - * This function will return in \p pConfig the current size of shared memory banks - * in the current context. On devices with configurable shared memory banks, - * ::cuCtxSetSharedMemConfig can be used to change this setting, so that all - * subsequent kernel launches will by default use the new bank size. When - * ::cuCtxGetSharedMemConfig is called on devices without configurable shared - * memory, it will return the fixed bank size of the hardware. - * - * The returned bank configurations can be either: - * - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: shared memory bank width is - * four bytes. - * - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: shared memory bank width will - * eight bytes. - * - * \param pConfig - returned shared memory configuration - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuCtxCreate, - * ::cuCtxDestroy, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPopCurrent, - * ::cuCtxPushCurrent, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize, - * ::cuCtxGetSharedMemConfig, - * ::cuFuncSetCacheConfig, - * ::cudaDeviceGetSharedMemConfig - */ -CUresult CUDAAPI cuCtxGetSharedMemConfig(CUsharedconfig *pConfig); - -/** - * \brief Sets the shared memory configuration for the current context. - * - * On devices with configurable shared memory banks, this function will set - * the context's shared memory bank size which is used for subsequent kernel - * launches. - * - * Changed the shared memory configuration between launches may insert a device - * side synchronization point between those launches. - * - * Changing the shared memory bank size will not increase shared memory usage - * or affect occupancy of kernels, but may have major effects on performance. - * Larger bank sizes will allow for greater potential bandwidth to shared memory, - * but will change what kinds of accesses to shared memory will result in bank - * conflicts. - * - * This function will do nothing on devices with fixed shared memory bank size. - * - * The supported bank configurations are: - * - ::CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: set bank width to the default initial - * setting (currently, four bytes). - * - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: set shared memory bank width to - * be natively four bytes. - * - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: set shared memory bank width to - * be natively eight bytes. - * - * \param config - requested shared memory configuration - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuCtxCreate, - * ::cuCtxDestroy, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPopCurrent, - * ::cuCtxPushCurrent, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize, - * ::cuCtxGetSharedMemConfig, - * ::cuFuncSetCacheConfig, - * ::cudaDeviceSetSharedMemConfig - */ -CUresult CUDAAPI cuCtxSetSharedMemConfig(CUsharedconfig config); - -/** - * \brief Gets the context's API version. - * - * Returns a version number in \p version corresponding to the capabilities of - * the context (e.g. 3010 or 3020), which library developers can use to direct - * callers to a specific API version. If \p ctx is NULL, returns the API version - * used to create the currently bound context. - * - * Note that new API versions are only introduced when context capabilities are - * changed that break binary compatibility, so the API version and driver version - * may be different. For example, it is valid for the API version to be 3020 while - * the driver version is 4020. - * - * \param ctx - Context to check - * \param version - Pointer to version - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_UNKNOWN - * \notefnerr - * - * \sa ::cuCtxCreate, - * ::cuCtxDestroy, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPopCurrent, - * ::cuCtxPushCurrent, - * ::cuCtxSetCacheConfig, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize - */ -CUresult CUDAAPI cuCtxGetApiVersion(CUcontext ctx, unsigned int *version); - -/** - * \brief Returns numerical values that correspond to the least and - * greatest stream priorities. - * - * Returns in \p *leastPriority and \p *greatestPriority the numerical values that correspond - * to the least and greatest stream priorities respectively. Stream priorities - * follow a convention where lower numbers imply greater priorities. The range of - * meaningful stream priorities is given by [\p *greatestPriority, \p *leastPriority]. - * If the user attempts to create a stream with a priority value that is - * outside the meaningful range as specified by this API, the priority is - * automatically clamped down or up to either \p *leastPriority or \p *greatestPriority - * respectively. See ::cuStreamCreateWithPriority for details on creating a - * priority stream. - * A NULL may be passed in for \p *leastPriority or \p *greatestPriority if the value - * is not desired. - * - * This function will return '0' in both \p *leastPriority and \p *greatestPriority if - * the current context's device does not support stream priorities - * (see ::cuDeviceGetAttribute). - * - * \param leastPriority - Pointer to an int in which the numerical value for least - * stream priority is returned - * \param greatestPriority - Pointer to an int in which the numerical value for greatest - * stream priority is returned - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * \notefnerr - * - * \sa ::cuStreamCreateWithPriority, - * ::cuStreamGetPriority, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize, - * ::cudaDeviceGetStreamPriorityRange - */ -CUresult CUDAAPI cuCtxGetStreamPriorityRange(int *leastPriority, int *greatestPriority); - -/** - * \brief Resets all persisting lines in cache to normal status. - * - * ::cuCtxResetPersistingL2Cache Resets all persisting lines in cache to normal - * status. Takes effect on function return. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_NOT_SUPPORTED - * \notefnerr - * - * \sa - * ::CUaccessPolicyWindow - */ -CUresult CUDAAPI cuCtxResetPersistingL2Cache(void); - -/** - * \brief Returns the execution affinity setting for the current context. - * - * Returns in \p *pExecAffinity the current value of \p type. The supported - * ::CUexecAffinityType values are: - * - ::CU_EXEC_AFFINITY_TYPE_SM_COUNT: number of SMs the context is limited to use. - * - * \param type - Execution affinity type to query - * \param pExecAffinity - Returned execution affinity - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_UNSUPPORTED_EXEC_AFFINITY - * \notefnerr - * - * \sa - * ::CUexecAffinityParam - */ -CUresult CUDAAPI cuCtxGetExecAffinity(CUexecAffinityParam *pExecAffinity, CUexecAffinityType type); - - -/** @} */ /* END CUDA_CTX */ - -/** - * \defgroup CUDA_CTX_DEPRECATED Context Management [DEPRECATED] - * - * ___MANBRIEF___ deprecated context management functions of the low-level CUDA - * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the deprecated context management functions of the low-level - * CUDA driver application programming interface. - * - * @{ - */ - -/** - * \brief Increment a context's usage-count - * - * \deprecated - * - * Note that this function is deprecated and should not be used. - * - * Increments the usage count of the context and passes back a context handle - * in \p *pctx that must be passed to ::cuCtxDetach() when the application is - * done with the context. ::cuCtxAttach() fails if there is no context current - * to the thread. - * - * Currently, the \p flags parameter must be 0. - * - * \param pctx - Returned context handle of the current context - * \param flags - Context attach flags (must be 0) - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuCtxCreate, - * ::cuCtxDestroy, - * ::cuCtxDetach, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPopCurrent, - * ::cuCtxPushCurrent, - * ::cuCtxSetCacheConfig, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuCtxAttach(CUcontext *pctx, unsigned int flags); - -/** - * \brief Decrement a context's usage-count - * - * \deprecated - * - * Note that this function is deprecated and should not be used. - * - * Decrements the usage count of the context \p ctx, and destroys the context - * if the usage count goes to 0. The context must be a handle that was passed - * back by ::cuCtxCreate() or ::cuCtxAttach(), and must be current to the - * calling thread. - * - * \param ctx - Context to destroy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT - * \notefnerr - * - * \sa ::cuCtxCreate, - * ::cuCtxDestroy, - * ::cuCtxGetApiVersion, - * ::cuCtxGetCacheConfig, - * ::cuCtxGetDevice, - * ::cuCtxGetFlags, - * ::cuCtxGetLimit, - * ::cuCtxPopCurrent, - * ::cuCtxPushCurrent, - * ::cuCtxSetCacheConfig, - * ::cuCtxSetLimit, - * ::cuCtxSynchronize - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuCtxDetach(CUcontext ctx); - -/** @} */ /* END CUDA_CTX_DEPRECATED */ - - -/** - * \defgroup CUDA_MODULE Module Management - * - * ___MANBRIEF___ module management functions of the low-level CUDA driver API - * (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the module management functions of the low-level CUDA - * driver application programming interface. - * - * @{ - */ - -/** - * \brief Loads a compute module - * - * Takes a filename \p fname and loads the corresponding module \p module into - * the current context. The CUDA driver API does not attempt to lazily - * allocate the resources needed by a module; if the memory for functions and - * data (constant and global) needed by the module cannot be allocated, - * ::cuModuleLoad() fails. The file should be a \e cubin file as output by - * \b nvcc, or a \e PTX file either as output by \b nvcc or handwritten, or - * a \e fatbin file as output by \b nvcc from toolchain 4.0 or later. - * - * \param module - Returned module - * \param fname - Filename of module to load - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_PTX, - * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, - * ::CUDA_ERROR_NOT_FOUND, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_FILE_NOT_FOUND, - * ::CUDA_ERROR_NO_BINARY_FOR_GPU, - * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, - * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, - * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND - * \notefnerr - * - * \sa ::cuModuleGetFunction, - * ::cuModuleGetGlobal, - * ::cuModuleGetTexRef, - * ::cuModuleLoadData, - * ::cuModuleLoadDataEx, - * ::cuModuleLoadFatBinary, - * ::cuModuleUnload - */ -CUresult CUDAAPI cuModuleLoad(CUmodule *module, const char *fname); - -/** - * \brief Load a module's data - * - * Takes a pointer \p image and loads the corresponding module \p module into - * the current context. The pointer may be obtained by mapping a \e cubin or - * \e PTX or \e fatbin file, passing a \e cubin or \e PTX or \e fatbin file - * as a NULL-terminated text string, or incorporating a \e cubin or \e fatbin - * object into the executable resources and using operating system calls such - * as Windows \c FindResource() to obtain the pointer. - * - * \param module - Returned module - * \param image - Module data to load - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_PTX, - * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_NO_BINARY_FOR_GPU, - * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, - * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, - * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND - * \notefnerr - * - * \sa ::cuModuleGetFunction, - * ::cuModuleGetGlobal, - * ::cuModuleGetTexRef, - * ::cuModuleLoad, - * ::cuModuleLoadDataEx, - * ::cuModuleLoadFatBinary, - * ::cuModuleUnload - */ -CUresult CUDAAPI cuModuleLoadData(CUmodule *module, const void *image); - -/** - * \brief Load a module's data with options - * - * Takes a pointer \p image and loads the corresponding module \p module into - * the current context. The pointer may be obtained by mapping a \e cubin or - * \e PTX or \e fatbin file, passing a \e cubin or \e PTX or \e fatbin file - * as a NULL-terminated text string, or incorporating a \e cubin or \e fatbin - * object into the executable resources and using operating system calls such - * as Windows \c FindResource() to obtain the pointer. Options are passed as - * an array via \p options and any corresponding parameters are passed in - * \p optionValues. The number of total options is supplied via \p numOptions. - * Any outputs will be returned via \p optionValues. - * - * \param module - Returned module - * \param image - Module data to load - * \param numOptions - Number of options - * \param options - Options for JIT - * \param optionValues - Option values for JIT - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_PTX, - * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_NO_BINARY_FOR_GPU, - * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, - * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, - * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND - * \notefnerr - * - * \sa ::cuModuleGetFunction, - * ::cuModuleGetGlobal, - * ::cuModuleGetTexRef, - * ::cuModuleLoad, - * ::cuModuleLoadData, - * ::cuModuleLoadFatBinary, - * ::cuModuleUnload - */ -CUresult CUDAAPI cuModuleLoadDataEx(CUmodule *module, const void *image, unsigned int numOptions, CUjit_option *options, void **optionValues); - -/** - * \brief Load a module's data - * - * Takes a pointer \p fatCubin and loads the corresponding module \p module - * into the current context. The pointer represents a fat binary object, - * which is a collection of different \e cubin and/or \e PTX files, all - * representing the same device code, but compiled and optimized for different - * architectures. - * - * Prior to CUDA 4.0, there was no documented API for constructing and using - * fat binary objects by programmers. Starting with CUDA 4.0, fat binary - * objects can be constructed by providing the -fatbin option to \b nvcc. - * More information can be found in the \b nvcc document. - * - * \param module - Returned module - * \param fatCubin - Fat binary to load - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_PTX, - * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, - * ::CUDA_ERROR_NOT_FOUND, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_NO_BINARY_FOR_GPU, - * ::CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND, - * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED, - * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND - * \notefnerr - * - * \sa ::cuModuleGetFunction, - * ::cuModuleGetGlobal, - * ::cuModuleGetTexRef, - * ::cuModuleLoad, - * ::cuModuleLoadData, - * ::cuModuleLoadDataEx, - * ::cuModuleUnload - */ -CUresult CUDAAPI cuModuleLoadFatBinary(CUmodule *module, const void *fatCubin); - -/** - * \brief Unloads a module - * - * Unloads a module \p hmod from the current context. - * - * \param hmod - Module to unload - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuModuleGetFunction, - * ::cuModuleGetGlobal, - * ::cuModuleGetTexRef, - * ::cuModuleLoad, - * ::cuModuleLoadData, - * ::cuModuleLoadDataEx, - * ::cuModuleLoadFatBinary - */ -CUresult CUDAAPI cuModuleUnload(CUmodule hmod); - -/** - * \brief Returns a function handle - * - * Returns in \p *hfunc the handle of the function of name \p name located in - * module \p hmod. If no function of that name exists, ::cuModuleGetFunction() - * returns ::CUDA_ERROR_NOT_FOUND. - * - * \param hfunc - Returned function handle - * \param hmod - Module to retrieve function from - * \param name - Name of function to retrieve - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_FOUND - * \notefnerr - * - * \sa ::cuModuleGetGlobal, - * ::cuModuleGetTexRef, - * ::cuModuleLoad, - * ::cuModuleLoadData, - * ::cuModuleLoadDataEx, - * ::cuModuleLoadFatBinary, - * ::cuModuleUnload - */ -CUresult CUDAAPI cuModuleGetFunction(CUfunction *hfunc, CUmodule hmod, const char *name); - -/** - * \brief Returns a global pointer from a module - * - * Returns in \p *dptr and \p *bytes the base pointer and size of the - * global of name \p name located in module \p hmod. If no variable of that name - * exists, ::cuModuleGetGlobal() returns ::CUDA_ERROR_NOT_FOUND. Both - * parameters \p dptr and \p bytes are optional. If one of them is - * NULL, it is ignored. - * - * \param dptr - Returned global device pointer - * \param bytes - Returned global size in bytes - * \param hmod - Module to retrieve global from - * \param name - Name of global to retrieve - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_FOUND - * \notefnerr - * - * \sa ::cuModuleGetFunction, - * ::cuModuleGetTexRef, - * ::cuModuleLoad, - * ::cuModuleLoadData, - * ::cuModuleLoadDataEx, - * ::cuModuleLoadFatBinary, - * ::cuModuleUnload, - * ::cudaGetSymbolAddress, - * ::cudaGetSymbolSize - */ -CUresult CUDAAPI cuModuleGetGlobal(CUdeviceptr *dptr, size_t *bytes, CUmodule hmod, const char *name); - -/** - * \brief Returns a handle to a texture reference - * - * Returns in \p *pTexRef the handle of the texture reference of name \p name - * in the module \p hmod. If no texture reference of that name exists, - * ::cuModuleGetTexRef() returns ::CUDA_ERROR_NOT_FOUND. This texture reference - * handle should not be destroyed, since it will be destroyed when the module - * is unloaded. - * - * \param pTexRef - Returned texture reference - * \param hmod - Module to retrieve texture reference from - * \param name - Name of texture reference to retrieve - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_FOUND - * \notefnerr - * - * \sa ::cuModuleGetFunction, - * ::cuModuleGetGlobal, - * ::cuModuleGetSurfRef, - * ::cuModuleLoad, - * ::cuModuleLoadData, - * ::cuModuleLoadDataEx, - * ::cuModuleLoadFatBinary, - * ::cuModuleUnload, - * ::cudaGetTextureReference - */ -CUresult CUDAAPI cuModuleGetTexRef(CUtexref *pTexRef, CUmodule hmod, const char *name); - -/** - * \brief Returns a handle to a surface reference - * - * Returns in \p *pSurfRef the handle of the surface reference of name \p name - * in the module \p hmod. If no surface reference of that name exists, - * ::cuModuleGetSurfRef() returns ::CUDA_ERROR_NOT_FOUND. - * - * \param pSurfRef - Returned surface reference - * \param hmod - Module to retrieve surface reference from - * \param name - Name of surface reference to retrieve - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_FOUND - * \notefnerr - * - * \sa ::cuModuleGetFunction, - * ::cuModuleGetGlobal, - * ::cuModuleGetTexRef, - * ::cuModuleLoad, - * ::cuModuleLoadData, - * ::cuModuleLoadDataEx, - * ::cuModuleLoadFatBinary, - * ::cuModuleUnload, - * ::cudaGetSurfaceReference - */ -CUresult CUDAAPI cuModuleGetSurfRef(CUsurfref *pSurfRef, CUmodule hmod, const char *name); - -/** - * \brief Creates a pending JIT linker invocation. - * - * If the call is successful, the caller owns the returned CUlinkState, which - * should eventually be destroyed with ::cuLinkDestroy. The - * device code machine size (32 or 64 bit) will match the calling application. - * - * Both linker and compiler options may be specified. Compiler options will - * be applied to inputs to this linker action which must be compiled from PTX. - * The options ::CU_JIT_WALL_TIME, - * ::CU_JIT_INFO_LOG_BUFFER_SIZE_BYTES, and ::CU_JIT_ERROR_LOG_BUFFER_SIZE_BYTES - * will accumulate data until the CUlinkState is destroyed. - * - * \p optionValues must remain valid for the life of the CUlinkState if output - * options are used. No other references to inputs are maintained after this - * call returns. - * - * \param numOptions Size of options arrays - * \param options Array of linker and compiler options - * \param optionValues Array of option values, each cast to void * - * \param stateOut On success, this will contain a CUlinkState to specify - * and complete this action - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_JIT_COMPILER_NOT_FOUND - * \notefnerr - * - * \sa ::cuLinkAddData, - * ::cuLinkAddFile, - * ::cuLinkComplete, - * ::cuLinkDestroy - */ -CUresult CUDAAPI -cuLinkCreate(unsigned int numOptions, CUjit_option *options, void **optionValues, CUlinkState *stateOut); - -/** - * \brief Add an input to a pending linker invocation - * - * Ownership of \p data is retained by the caller. No reference is retained to any - * inputs after this call returns. - * - * This method accepts only compiler options, which are used if the data must - * be compiled from PTX, and does not accept any of - * ::CU_JIT_WALL_TIME, ::CU_JIT_INFO_LOG_BUFFER, ::CU_JIT_ERROR_LOG_BUFFER, - * ::CU_JIT_TARGET_FROM_CUCONTEXT, or ::CU_JIT_TARGET. - * - * \param state A pending linker action. - * \param type The type of the input data. - * \param data The input data. PTX must be NULL-terminated. - * \param size The length of the input data. - * \param name An optional name for this input in log messages. - * \param numOptions Size of options. - * \param options Options to be applied only for this input (overrides options from ::cuLinkCreate). - * \param optionValues Array of option values, each cast to void *. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_IMAGE, - * ::CUDA_ERROR_INVALID_PTX, - * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_NO_BINARY_FOR_GPU - * - * \sa ::cuLinkCreate, - * ::cuLinkAddFile, - * ::cuLinkComplete, - * ::cuLinkDestroy - */ -CUresult CUDAAPI -cuLinkAddData(CUlinkState state, CUjitInputType type, void *data, size_t size, const char *name, - unsigned int numOptions, CUjit_option *options, void **optionValues); - -/** - * \brief Add a file input to a pending linker invocation - * - * No reference is retained to any inputs after this call returns. - * - * This method accepts only compiler options, which are used if the input - * must be compiled from PTX, and does not accept any of - * ::CU_JIT_WALL_TIME, ::CU_JIT_INFO_LOG_BUFFER, ::CU_JIT_ERROR_LOG_BUFFER, - * ::CU_JIT_TARGET_FROM_CUCONTEXT, or ::CU_JIT_TARGET. - * - * This method is equivalent to invoking ::cuLinkAddData on the contents - * of the file. - * - * \param state A pending linker action - * \param type The type of the input data - * \param path Path to the input file - * \param numOptions Size of options - * \param options Options to be applied only for this input (overrides options from ::cuLinkCreate) - * \param optionValues Array of option values, each cast to void * - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_FILE_NOT_FOUND - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_IMAGE, - * ::CUDA_ERROR_INVALID_PTX, - * ::CUDA_ERROR_UNSUPPORTED_PTX_VERSION, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_NO_BINARY_FOR_GPU - * - * \sa ::cuLinkCreate, - * ::cuLinkAddData, - * ::cuLinkComplete, - * ::cuLinkDestroy - */ -CUresult CUDAAPI -cuLinkAddFile(CUlinkState state, CUjitInputType type, const char *path, - unsigned int numOptions, CUjit_option *options, void **optionValues); - -/** - * \brief Complete a pending linker invocation - * - * Completes the pending linker action and returns the cubin image for the linked - * device code, which can be used with ::cuModuleLoadData. The cubin is owned by - * \p state, so it should be loaded before \p state is destroyed via ::cuLinkDestroy. - * This call does not destroy \p state. - * - * \param state A pending linker invocation - * \param cubinOut On success, this will point to the output image - * \param sizeOut Optional parameter to receive the size of the generated image - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * - * \sa ::cuLinkCreate, - * ::cuLinkAddData, - * ::cuLinkAddFile, - * ::cuLinkDestroy, - * ::cuModuleLoadData - */ -CUresult CUDAAPI -cuLinkComplete(CUlinkState state, void **cubinOut, size_t *sizeOut); - -/** - * \brief Destroys state for a JIT linker invocation. - * - * \param state State object for the linker invocation - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_HANDLE - * - * \sa ::cuLinkCreate - */ -CUresult CUDAAPI -cuLinkDestroy(CUlinkState state); - -/** @} */ /* END CUDA_MODULE */ - - -/** - * \defgroup CUDA_MEM Memory Management - * - * ___MANBRIEF___ memory management functions of the low-level CUDA driver API - * (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the memory management functions of the low-level CUDA - * driver application programming interface. - * - * @{ - */ - -/** - * \brief Gets free and total memory - * - * Returns in \p *free and \p *total respectively, the free and total amount of - * memory available for allocation by the CUDA context, in bytes. - * - * \param free - Returned free memory in bytes - * \param total - Returned total memory in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMemGetInfo - */ -CUresult CUDAAPI cuMemGetInfo(size_t *free, size_t *total); - -/** - * \brief Allocates device memory - * - * Allocates \p bytesize bytes of linear memory on the device and returns in - * \p *dptr a pointer to the allocated memory. The allocated memory is suitably - * aligned for any kind of variable. The memory is not cleared. If \p bytesize - * is 0, ::cuMemAlloc() returns ::CUDA_ERROR_INVALID_VALUE. - * - * \param dptr - Returned device pointer - * \param bytesize - Requested allocation size in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * \notefnerr - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMalloc - */ -CUresult CUDAAPI cuMemAlloc(CUdeviceptr *dptr, size_t bytesize); - -/** - * \brief Allocates pitched device memory - * - * Allocates at least \p WidthInBytes * \p Height bytes of linear memory on - * the device and returns in \p *dptr a pointer to the allocated memory. The - * function may pad the allocation to ensure that corresponding pointers in - * any given row will continue to meet the alignment requirements for - * coalescing as the address is updated from row to row. \p ElementSizeBytes - * specifies the size of the largest reads and writes that will be performed - * on the memory range. \p ElementSizeBytes may be 4, 8 or 16 (since coalesced - * memory transactions are not possible on other data sizes). If - * \p ElementSizeBytes is smaller than the actual read/write size of a kernel, - * the kernel will run correctly, but possibly at reduced speed. The pitch - * returned in \p *pPitch by ::cuMemAllocPitch() is the width in bytes of the - * allocation. The intended usage of pitch is as a separate parameter of the - * allocation, used to compute addresses within the 2D array. Given the row - * and column of an array element of type \b T, the address is computed as: - * \code - T* pElement = (T*)((char*)BaseAddress + Row * Pitch) + Column; - * \endcode - * - * The pitch returned by ::cuMemAllocPitch() is guaranteed to work with - * ::cuMemcpy2D() under all circumstances. For allocations of 2D arrays, it is - * recommended that programmers consider performing pitch allocations using - * ::cuMemAllocPitch(). Due to alignment restrictions in the hardware, this is - * especially true if the application will be performing 2D memory copies - * between different regions of device memory (whether linear memory or CUDA - * arrays). - * - * The byte alignment of the pitch returned by ::cuMemAllocPitch() is guaranteed - * to match or exceed the alignment requirement for texture binding with - * ::cuTexRefSetAddress2D(). - * - * \param dptr - Returned device pointer - * \param pPitch - Returned pitch of allocation in bytes - * \param WidthInBytes - Requested allocation width in bytes - * \param Height - Requested allocation height in rows - * \param ElementSizeBytes - Size of largest reads/writes for range - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * \notefnerr - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMallocPitch - */ -CUresult CUDAAPI cuMemAllocPitch(CUdeviceptr *dptr, size_t *pPitch, size_t WidthInBytes, size_t Height, unsigned int ElementSizeBytes); - -/** - * \brief Frees device memory - * - * Frees the memory space pointed to by \p dptr, which must have been returned - * by a previous call to ::cuMemAlloc() or ::cuMemAllocPitch(). - * - * \param dptr - Pointer to memory to free - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaFree - */ -CUresult CUDAAPI cuMemFree(CUdeviceptr dptr); - -/** - * \brief Get information on memory allocations - * - * Returns the base address in \p *pbase and size in \p *psize of the - * allocation by ::cuMemAlloc() or ::cuMemAllocPitch() that contains the input - * pointer \p dptr. Both parameters \p pbase and \p psize are optional. If one - * of them is NULL, it is ignored. - * - * \param pbase - Returned base address - * \param psize - Returned size of device memory allocation - * \param dptr - Device pointer to query - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_NOT_FOUND, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32 - */ -CUresult CUDAAPI cuMemGetAddressRange(CUdeviceptr *pbase, size_t *psize, CUdeviceptr dptr); - -/** - * \brief Allocates page-locked host memory - * - * Allocates \p bytesize bytes of host memory that is page-locked and - * accessible to the device. The driver tracks the virtual memory ranges - * allocated with this function and automatically accelerates calls to - * functions such as ::cuMemcpy(). Since the memory can be accessed directly by - * the device, it can be read or written with much higher bandwidth than - * pageable memory obtained with functions such as ::malloc(). Allocating - * excessive amounts of memory with ::cuMemAllocHost() may degrade system - * performance, since it reduces the amount of memory available to the system - * for paging. As a result, this function is best used sparingly to allocate - * staging areas for data exchange between host and device. - * - * Note all host memory allocated using ::cuMemHostAlloc() will automatically - * be immediately accessible to all contexts on all devices which support unified - * addressing (as may be queried using ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING). - * The device pointer that may be used to access this host memory from those - * contexts is always equal to the returned host pointer \p *pp. - * See \ref CUDA_UNIFIED for additional details. - * - * \param pp - Returned host pointer to page-locked memory - * \param bytesize - Requested allocation size in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * \notefnerr - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMallocHost - */ -CUresult CUDAAPI cuMemAllocHost(void **pp, size_t bytesize); - -/** - * \brief Frees page-locked host memory - * - * Frees the memory space pointed to by \p p, which must have been returned by - * a previous call to ::cuMemAllocHost(). - * - * \param p - Pointer to memory to free - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaFreeHost - */ -CUresult CUDAAPI cuMemFreeHost(void *p); - -/** - * \brief Allocates page-locked host memory - * - * Allocates \p bytesize bytes of host memory that is page-locked and accessible - * to the device. The driver tracks the virtual memory ranges allocated with - * this function and automatically accelerates calls to functions such as - * ::cuMemcpyHtoD(). Since the memory can be accessed directly by the device, - * it can be read or written with much higher bandwidth than pageable memory - * obtained with functions such as ::malloc(). Allocating excessive amounts of - * pinned memory may degrade system performance, since it reduces the amount - * of memory available to the system for paging. As a result, this function is - * best used sparingly to allocate staging areas for data exchange between - * host and device. - * - * The \p Flags parameter enables different options to be specified that - * affect the allocation, as follows. - * - * - ::CU_MEMHOSTALLOC_PORTABLE: The memory returned by this call will be - * considered as pinned memory by all CUDA contexts, not just the one that - * performed the allocation. - * - * - ::CU_MEMHOSTALLOC_DEVICEMAP: Maps the allocation into the CUDA address - * space. The device pointer to the memory may be obtained by calling - * ::cuMemHostGetDevicePointer(). - * - * - ::CU_MEMHOSTALLOC_WRITECOMBINED: Allocates the memory as write-combined - * (WC). WC memory can be transferred across the PCI Express bus more - * quickly on some system configurations, but cannot be read efficiently by - * most CPUs. WC memory is a good option for buffers that will be written by - * the CPU and read by the GPU via mapped pinned memory or host->device - * transfers. - * - * All of these flags are orthogonal to one another: a developer may allocate - * memory that is portable, mapped and/or write-combined with no restrictions. - * - * The ::CU_MEMHOSTALLOC_DEVICEMAP flag may be specified on CUDA contexts for - * devices that do not support mapped pinned memory. The failure is deferred - * to ::cuMemHostGetDevicePointer() because the memory may be mapped into - * other CUDA contexts via the ::CU_MEMHOSTALLOC_PORTABLE flag. - * - * The memory allocated by this function must be freed with ::cuMemFreeHost(). - * - * Note all host memory allocated using ::cuMemHostAlloc() will automatically - * be immediately accessible to all contexts on all devices which support unified - * addressing (as may be queried using ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING). - * Unless the flag ::CU_MEMHOSTALLOC_WRITECOMBINED is specified, the device pointer - * that may be used to access this host memory from those contexts is always equal - * to the returned host pointer \p *pp. If the flag ::CU_MEMHOSTALLOC_WRITECOMBINED - * is specified, then the function ::cuMemHostGetDevicePointer() must be used - * to query the device pointer, even if the context supports unified addressing. - * See \ref CUDA_UNIFIED for additional details. - * - * \param pp - Returned host pointer to page-locked memory - * \param bytesize - Requested allocation size in bytes - * \param Flags - Flags for allocation request - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * \notefnerr - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaHostAlloc - */ -CUresult CUDAAPI cuMemHostAlloc(void **pp, size_t bytesize, unsigned int Flags); - -/** - * \brief Passes back device pointer of mapped pinned memory - * - * Passes back the device pointer \p pdptr corresponding to the mapped, pinned - * host buffer \p p allocated by ::cuMemHostAlloc. - * - * ::cuMemHostGetDevicePointer() will fail if the ::CU_MEMHOSTALLOC_DEVICEMAP - * flag was not specified at the time the memory was allocated, or if the - * function is called on a GPU that does not support mapped pinned memory. - * - * For devices that have a non-zero value for the device attribute - * ::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM, the memory - * can also be accessed from the device using the host pointer \p p. - * The device pointer returned by ::cuMemHostGetDevicePointer() may or may not - * match the original host pointer \p p and depends on the devices visible to the - * application. If all devices visible to the application have a non-zero value for the - * device attribute, the device pointer returned by ::cuMemHostGetDevicePointer() - * will match the original pointer \p p. If any device visible to the application - * has a zero value for the device attribute, the device pointer returned by - * ::cuMemHostGetDevicePointer() will not match the original host pointer \p p, - * but it will be suitable for use on all devices provided Unified Virtual Addressing - * is enabled. In such systems, it is valid to access the memory using either pointer - * on devices that have a non-zero value for the device attribute. Note however that - * such devices should access the memory using only of the two pointers and not both. - * - * \p Flags provides for future releases. For now, it must be set to 0. - * - * \param pdptr - Returned device pointer - * \param p - Host pointer - * \param Flags - Options (must be 0) - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaHostGetDevicePointer - */ -CUresult CUDAAPI cuMemHostGetDevicePointer(CUdeviceptr *pdptr, void *p, unsigned int Flags); - -/** - * \brief Passes back flags that were used for a pinned allocation - * - * Passes back the flags \p pFlags that were specified when allocating - * the pinned host buffer \p p allocated by ::cuMemHostAlloc. - * - * ::cuMemHostGetFlags() will fail if the pointer does not reside in - * an allocation performed by ::cuMemAllocHost() or ::cuMemHostAlloc(). - * - * \param pFlags - Returned flags word - * \param p - Host pointer - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa - * ::cuMemAllocHost, - * ::cuMemHostAlloc, - * ::cudaHostGetFlags - */ -CUresult CUDAAPI cuMemHostGetFlags(unsigned int *pFlags, void *p); - -/** - * \brief Allocates memory that will be automatically managed by the Unified Memory system - * - * Allocates \p bytesize bytes of managed memory on the device and returns in - * \p *dptr a pointer to the allocated memory. If the device doesn't support - * allocating managed memory, ::CUDA_ERROR_NOT_SUPPORTED is returned. Support - * for managed memory can be queried using the device attribute - * ::CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY. The allocated memory is suitably - * aligned for any kind of variable. The memory is not cleared. If \p bytesize - * is 0, ::cuMemAllocManaged returns ::CUDA_ERROR_INVALID_VALUE. The pointer - * is valid on the CPU and on all GPUs in the system that support managed memory. - * All accesses to this pointer must obey the Unified Memory programming model. - * - * \p flags specifies the default stream association for this allocation. - * \p flags must be one of ::CU_MEM_ATTACH_GLOBAL or ::CU_MEM_ATTACH_HOST. If - * ::CU_MEM_ATTACH_GLOBAL is specified, then this memory is accessible from - * any stream on any device. If ::CU_MEM_ATTACH_HOST is specified, then the - * allocation should not be accessed from devices that have a zero value for the - * device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS; an explicit call to - * ::cuStreamAttachMemAsync will be required to enable access on such devices. - * - * If the association is later changed via ::cuStreamAttachMemAsync to - * a single stream, the default association as specified during ::cuMemAllocManaged - * is restored when that stream is destroyed. For __managed__ variables, the - * default association is always ::CU_MEM_ATTACH_GLOBAL. Note that destroying a - * stream is an asynchronous operation, and as a result, the change to default - * association won't happen until all work in the stream has completed. - * - * Memory allocated with ::cuMemAllocManaged should be released with ::cuMemFree. - * - * Device memory oversubscription is possible for GPUs that have a non-zero value for the - * device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Managed memory on - * such GPUs may be evicted from device memory to host memory at any time by the Unified - * Memory driver in order to make room for other allocations. - * - * In a multi-GPU system where all GPUs have a non-zero value for the device attribute - * ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS, managed memory may not be populated when this - * API returns and instead may be populated on access. In such systems, managed memory can - * migrate to any processor's memory at any time. The Unified Memory driver will employ heuristics to - * maintain data locality and prevent excessive page faults to the extent possible. The application - * can also guide the driver about memory usage patterns via ::cuMemAdvise. The application - * can also explicitly migrate memory to a desired processor's memory via - * ::cuMemPrefetchAsync. - * - * In a multi-GPU system where all of the GPUs have a zero value for the device attribute - * ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS and all the GPUs have peer-to-peer support - * with each other, the physical storage for managed memory is created on the GPU which is active - * at the time ::cuMemAllocManaged is called. All other GPUs will reference the data at reduced - * bandwidth via peer mappings over the PCIe bus. The Unified Memory driver does not migrate - * memory among such GPUs. - * - * In a multi-GPU system where not all GPUs have peer-to-peer support with each other and - * where the value of the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS - * is zero for at least one of those GPUs, the location chosen for physical storage of managed - * memory is system-dependent. - * - On Linux, the location chosen will be device memory as long as the current set of active - * contexts are on devices that either have peer-to-peer support with each other or have a - * non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. - * If there is an active context on a GPU that does not have a non-zero value for that device - * attribute and it does not have peer-to-peer support with the other devices that have active - * contexts on them, then the location for physical storage will be 'zero-copy' or host memory. - * Note that this means that managed memory that is located in device memory is migrated to - * host memory if a new context is created on a GPU that doesn't have a non-zero value for - * the device attribute and does not support peer-to-peer with at least one of the other devices - * that has an active context. This in turn implies that context creation may fail if there is - * insufficient host memory to migrate all managed allocations. - * - On Windows, the physical storage is always created in 'zero-copy' or host memory. - * All GPUs will reference the data at reduced bandwidth over the PCIe bus. In these - * circumstances, use of the environment variable CUDA_VISIBLE_DEVICES is recommended to - * restrict CUDA to only use those GPUs that have peer-to-peer support. - * Alternatively, users can also set CUDA_MANAGED_FORCE_DEVICE_ALLOC to a - * non-zero value to force the driver to always use device memory for physical storage. - * When this environment variable is set to a non-zero value, all contexts created in - * that process on devices that support managed memory have to be peer-to-peer compatible - * with each other. Context creation will fail if a context is created on a device that - * supports managed memory and is not peer-to-peer compatible with any of the other - * managed memory supporting devices on which contexts were previously created, even if - * those contexts have been destroyed. These environment variables are described - * in the CUDA programming guide under the "CUDA environment variables" section. - * - On ARM, managed memory is not available on discrete gpu with Drive PX-2. - * - * \param dptr - Returned device pointer - * \param bytesize - Requested allocation size in bytes - * \param flags - Must be one of ::CU_MEM_ATTACH_GLOBAL or ::CU_MEM_ATTACH_HOST - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_NOT_SUPPORTED, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * \notefnerr - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cuDeviceGetAttribute, ::cuStreamAttachMemAsync, - * ::cudaMallocManaged - */ -CUresult CUDAAPI cuMemAllocManaged(CUdeviceptr *dptr, size_t bytesize, unsigned int flags); - -/** - * \brief Returns a handle to a compute device - * - * Returns in \p *device a device handle given a PCI bus ID string. - * - * \param dev - Returned device handle - * - * \param pciBusId - String in one of the following forms: - * [domain]:[bus]:[device].[function] - * [domain]:[bus]:[device] - * [bus]:[device].[function] - * where \p domain, \p bus, \p device, and \p function are all hexadecimal values - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa - * ::cuDeviceGet, - * ::cuDeviceGetAttribute, - * ::cuDeviceGetPCIBusId, - * ::cudaDeviceGetByPCIBusId - */ -CUresult CUDAAPI cuDeviceGetByPCIBusId(CUdevice *dev, const char *pciBusId); - -/** - * \brief Returns a PCI Bus Id string for the device - * - * Returns an ASCII string identifying the device \p dev in the NULL-terminated - * string pointed to by \p pciBusId. \p len specifies the maximum length of the - * string that may be returned. - * - * \param pciBusId - Returned identifier string for the device in the following format - * [domain]:[bus]:[device].[function] - * where \p domain, \p bus, \p device, and \p function are all hexadecimal values. - * pciBusId should be large enough to store 13 characters including the NULL-terminator. - * - * \param len - Maximum length of string to store in \p name - * - * \param dev - Device to get identifier string for - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa - * ::cuDeviceGet, - * ::cuDeviceGetAttribute, - * ::cuDeviceGetByPCIBusId, - * ::cudaDeviceGetPCIBusId - */ -CUresult CUDAAPI cuDeviceGetPCIBusId(char *pciBusId, int len, CUdevice dev); - -/** - * \brief Gets an interprocess handle for a previously allocated event - * - * Takes as input a previously allocated event. This event must have been - * created with the ::CU_EVENT_INTERPROCESS and ::CU_EVENT_DISABLE_TIMING - * flags set. This opaque handle may be copied into other processes and - * opened with ::cuIpcOpenEventHandle to allow efficient hardware - * synchronization between GPU work in different processes. - * - * After the event has been opened in the importing process, - * ::cuEventRecord, ::cuEventSynchronize, ::cuStreamWaitEvent and - * ::cuEventQuery may be used in either process. Performing operations - * on the imported event after the exported event has been freed - * with ::cuEventDestroy will result in undefined behavior. - * - * IPC functionality is restricted to devices with support for unified - * addressing on Linux and Windows operating systems. - * IPC functionality on Windows is restricted to GPUs in TCC mode - * - * \param pHandle - Pointer to a user allocated CUipcEventHandle - * in which to return the opaque event handle - * \param event - Event allocated with ::CU_EVENT_INTERPROCESS and - * ::CU_EVENT_DISABLE_TIMING flags. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_MAP_FAILED, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::cuEventCreate, - * ::cuEventDestroy, - * ::cuEventSynchronize, - * ::cuEventQuery, - * ::cuStreamWaitEvent, - * ::cuIpcOpenEventHandle, - * ::cuIpcGetMemHandle, - * ::cuIpcOpenMemHandle, - * ::cuIpcCloseMemHandle, - * ::cudaIpcGetEventHandle - */ -CUresult CUDAAPI cuIpcGetEventHandle(CUipcEventHandle *pHandle, CUevent event); - -/** - * \brief Opens an interprocess event handle for use in the current process - * - * Opens an interprocess event handle exported from another process with - * ::cuIpcGetEventHandle. This function returns a ::CUevent that behaves like - * a locally created event with the ::CU_EVENT_DISABLE_TIMING flag specified. - * This event must be freed with ::cuEventDestroy. - * - * Performing operations on the imported event after the exported event has - * been freed with ::cuEventDestroy will result in undefined behavior. - * - * IPC functionality is restricted to devices with support for unified - * addressing on Linux and Windows operating systems. - * IPC functionality on Windows is restricted to GPUs in TCC mode - * - * \param phEvent - Returns the imported event - * \param handle - Interprocess handle to open - * - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_MAP_FAILED, - * ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::cuEventCreate, - * ::cuEventDestroy, - * ::cuEventSynchronize, - * ::cuEventQuery, - * ::cuStreamWaitEvent, - * ::cuIpcGetEventHandle, - * ::cuIpcGetMemHandle, - * ::cuIpcOpenMemHandle, - * ::cuIpcCloseMemHandle, - * ::cudaIpcOpenEventHandle - */ -CUresult CUDAAPI cuIpcOpenEventHandle(CUevent *phEvent, CUipcEventHandle handle); - -/** - * \brief Gets an interprocess memory handle for an existing device memory - * allocation - * - * Takes a pointer to the base of an existing device memory allocation created - * with ::cuMemAlloc and exports it for use in another process. This is a - * lightweight operation and may be called multiple times on an allocation - * without adverse effects. - * - * If a region of memory is freed with ::cuMemFree and a subsequent call - * to ::cuMemAlloc returns memory with the same device address, - * ::cuIpcGetMemHandle will return a unique handle for the - * new memory. - * - * IPC functionality is restricted to devices with support for unified - * addressing on Linux and Windows operating systems. - * IPC functionality on Windows is restricted to GPUs in TCC mode - * - * \param pHandle - Pointer to user allocated ::CUipcMemHandle to return - * the handle in. - * \param dptr - Base pointer to previously allocated device memory - * - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_MAP_FAILED, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::cuMemAlloc, - * ::cuMemFree, - * ::cuIpcGetEventHandle, - * ::cuIpcOpenEventHandle, - * ::cuIpcOpenMemHandle, - * ::cuIpcCloseMemHandle, - * ::cudaIpcGetMemHandle - */ -CUresult CUDAAPI cuIpcGetMemHandle(CUipcMemHandle *pHandle, CUdeviceptr dptr); - -/** - * \brief Opens an interprocess memory handle exported from another process - * and returns a device pointer usable in the local process. - * - * Maps memory exported from another process with ::cuIpcGetMemHandle into - * the current device address space. For contexts on different devices - * ::cuIpcOpenMemHandle can attempt to enable peer access between the - * devices as if the user called ::cuCtxEnablePeerAccess. This behavior is - * controlled by the ::CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS flag. - * ::cuDeviceCanAccessPeer can determine if a mapping is possible. - * - * Contexts that may open ::CUipcMemHandles are restricted in the following way. - * ::CUipcMemHandles from each ::CUdevice in a given process may only be opened - * by one ::CUcontext per ::CUdevice per other process. - * - * If the memory handle has already been opened by the current context, the - * reference count on the handle is incremented by 1 and the existing device pointer - * is returned. - * - * Memory returned from ::cuIpcOpenMemHandle must be freed with - * ::cuIpcCloseMemHandle. - * - * Calling ::cuMemFree on an exported memory region before calling - * ::cuIpcCloseMemHandle in the importing context will result in undefined - * behavior. - * - * IPC functionality is restricted to devices with support for unified - * addressing on Linux and Windows operating systems. - * IPC functionality on Windows is restricted to GPUs in TCC mode - * - * \param pdptr - Returned device pointer - * \param handle - ::CUipcMemHandle to open - * \param Flags - Flags for this operation. Must be specified as ::CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS - * - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_MAP_FAILED, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_TOO_MANY_PEERS, - * ::CUDA_ERROR_INVALID_VALUE - * - * \note No guarantees are made about the address returned in \p *pdptr. - * In particular, multiple processes may not receive the same address for the same \p handle. - * - * \sa - * ::cuMemAlloc, - * ::cuMemFree, - * ::cuIpcGetEventHandle, - * ::cuIpcOpenEventHandle, - * ::cuIpcGetMemHandle, - * ::cuIpcCloseMemHandle, - * ::cuCtxEnablePeerAccess, - * ::cuDeviceCanAccessPeer, - * ::cudaIpcOpenMemHandle - */ -CUresult CUDAAPI cuIpcOpenMemHandle(CUdeviceptr *pdptr, CUipcMemHandle handle, unsigned int Flags); - -/** - * \brief Attempts to close memory mapped with ::cuIpcOpenMemHandle - * - * Decrements the reference count of the memory returned by ::cuIpcOpenMemHandle by 1. - * When the reference count reaches 0, this API unmaps the memory. The original allocation - * in the exporting process as well as imported mappings in other processes - * will be unaffected. - * - * Any resources used to enable peer access will be freed if this is the - * last mapping using them. - * - * IPC functionality is restricted to devices with support for unified - * addressing on Linux and Windows operating systems. - * IPC functionality on Windows is restricted to GPUs in TCC mode - * - * \param dptr - Device pointer returned by ::cuIpcOpenMemHandle - * - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_MAP_FAILED, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_INVALID_VALUE - * \sa - * ::cuMemAlloc, - * ::cuMemFree, - * ::cuIpcGetEventHandle, - * ::cuIpcOpenEventHandle, - * ::cuIpcGetMemHandle, - * ::cuIpcOpenMemHandle, - * ::cudaIpcCloseMemHandle - */ -CUresult CUDAAPI cuIpcCloseMemHandle(CUdeviceptr dptr); - -/** - * \brief Registers an existing host memory range for use by CUDA - * - * Page-locks the memory range specified by \p p and \p bytesize and maps it - * for the device(s) as specified by \p Flags. This memory range also is added - * to the same tracking mechanism as ::cuMemHostAlloc to automatically accelerate - * calls to functions such as ::cuMemcpyHtoD(). Since the memory can be accessed - * directly by the device, it can be read or written with much higher bandwidth - * than pageable memory that has not been registered. Page-locking excessive - * amounts of memory may degrade system performance, since it reduces the amount - * of memory available to the system for paging. As a result, this function is - * best used sparingly to register staging areas for data exchange between - * host and device. - * - * This function has limited support on Mac OS X. OS 10.7 or higher is required. - * - * The \p Flags parameter enables different options to be specified that - * affect the allocation, as follows. - * - * - ::CU_MEMHOSTREGISTER_PORTABLE: The memory returned by this call will be - * considered as pinned memory by all CUDA contexts, not just the one that - * performed the allocation. - * - * - ::CU_MEMHOSTREGISTER_DEVICEMAP: Maps the allocation into the CUDA address - * space. The device pointer to the memory may be obtained by calling - * ::cuMemHostGetDevicePointer(). - * - * - ::CU_MEMHOSTREGISTER_IOMEMORY: The pointer is treated as pointing to some - * I/O memory space, e.g. the PCI Express resource of a 3rd party device. - * - * - ::CU_MEMHOSTREGISTER_READ_ONLY: The pointer is treated as pointing to memory - * that is considered read-only by the device. On platforms without - * CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, this flag is - * required in order to register memory mapped to the CPU as read-only. Support - * for the use of this flag can be queried from the device attribute - * CU_DEVICE_ATTRIBUTE_READ_ONLY_HOST_REGISTER_SUPPORTED. Using this flag with - * a current context associated with a device that does not have this attribute - * set will cause ::cuMemHostRegister to error with CUDA_ERROR_NOT_SUPPORTED. - * - * All of these flags are orthogonal to one another: a developer may page-lock - * memory that is portable or mapped with no restrictions. - * - * The ::CU_MEMHOSTREGISTER_DEVICEMAP flag may be specified on CUDA contexts for - * devices that do not support mapped pinned memory. The failure is deferred - * to ::cuMemHostGetDevicePointer() because the memory may be mapped into - * other CUDA contexts via the ::CU_MEMHOSTREGISTER_PORTABLE flag. - * - * For devices that have a non-zero value for the device attribute - * ::CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM, the memory - * can also be accessed from the device using the host pointer \p p. - * The device pointer returned by ::cuMemHostGetDevicePointer() may or may not - * match the original host pointer \p ptr and depends on the devices visible to the - * application. If all devices visible to the application have a non-zero value for the - * device attribute, the device pointer returned by ::cuMemHostGetDevicePointer() - * will match the original pointer \p ptr. If any device visible to the application - * has a zero value for the device attribute, the device pointer returned by - * ::cuMemHostGetDevicePointer() will not match the original host pointer \p ptr, - * but it will be suitable for use on all devices provided Unified Virtual Addressing - * is enabled. In such systems, it is valid to access the memory using either pointer - * on devices that have a non-zero value for the device attribute. Note however that - * such devices should access the memory using only of the two pointers and not both. - * - * The memory page-locked by this function must be unregistered with - * ::cuMemHostUnregister(). - * - * \param p - Host pointer to memory to page-lock - * \param bytesize - Size in bytes of the address range to page-lock - * \param Flags - Flags for allocation request - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED, - * ::CUDA_ERROR_NOT_PERMITTED, - * ::CUDA_ERROR_NOT_SUPPORTED - * \notefnerr - * - * \sa - * ::cuMemHostUnregister, - * ::cuMemHostGetFlags, - * ::cuMemHostGetDevicePointer, - * ::cudaHostRegister - */ -CUresult CUDAAPI cuMemHostRegister(void *p, size_t bytesize, unsigned int Flags); - -/** - * \brief Unregisters a memory range that was registered with cuMemHostRegister. - * - * Unmaps the memory range whose base address is specified by \p p, and makes - * it pageable again. - * - * The base address must be the same one specified to ::cuMemHostRegister(). - * - * \param p - Host pointer to memory to unregister - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED, - * \notefnerr - * - * \sa - * ::cuMemHostRegister, - * ::cudaHostUnregister - */ -CUresult CUDAAPI cuMemHostUnregister(void *p); - -/** - * \brief Copies memory - * - * Copies data between two pointers. - * \p dst and \p src are base pointers of the destination and source, respectively. - * \p ByteCount specifies the number of bytes to copy. - * Note that this function infers the type of the transfer (host to host, host to - * device, device to device, or device to host) from the pointer values. This - * function is only allowed in contexts which support unified addressing. - * - * \param dst - Destination unified virtual address space pointer - * \param src - Source unified virtual address space pointer - * \param ByteCount - Size of memory copy in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_sync - * \note_memcpy - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMemcpy, - * ::cudaMemcpyToSymbol, - * ::cudaMemcpyFromSymbol - */ -CUresult CUDAAPI cuMemcpy(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount); - -/** - * \brief Copies device memory between two contexts - * - * Copies from device memory in one context to device memory in another - * context. \p dstDevice is the base device pointer of the destination memory - * and \p dstContext is the destination context. \p srcDevice is the base - * device pointer of the source memory and \p srcContext is the source pointer. - * \p ByteCount specifies the number of bytes to copy. - * - * \param dstDevice - Destination device pointer - * \param dstContext - Destination context - * \param srcDevice - Source device pointer - * \param srcContext - Source context - * \param ByteCount - Size of memory copy in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_sync - * - * \sa ::cuMemcpyDtoD, ::cuMemcpy3DPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync, - * ::cuMemcpy3DPeerAsync, - * ::cudaMemcpyPeer - */ -CUresult CUDAAPI cuMemcpyPeer(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount); - -/** - * \brief Copies memory from Host to Device - * - * Copies from host memory to device memory. \p dstDevice and \p srcHost are - * the base addresses of the destination and source, respectively. \p ByteCount - * specifies the number of bytes to copy. - * - * \param dstDevice - Destination device pointer - * \param srcHost - Source host pointer - * \param ByteCount - Size of memory copy in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_sync - * \note_memcpy - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMemcpy, - * ::cudaMemcpyToSymbol - */ -CUresult CUDAAPI cuMemcpyHtoD(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount); - -/** - * \brief Copies memory from Device to Host - * - * Copies from device to host memory. \p dstHost and \p srcDevice specify the - * base pointers of the destination and source, respectively. \p ByteCount - * specifies the number of bytes to copy. - * - * \param dstHost - Destination host pointer - * \param srcDevice - Source device pointer - * \param ByteCount - Size of memory copy in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_sync - * \note_memcpy - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMemcpy, - * ::cudaMemcpyFromSymbol - */ -CUresult CUDAAPI cuMemcpyDtoH(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount); - -/** - * \brief Copies memory from Device to Device - * - * Copies from device memory to device memory. \p dstDevice and \p srcDevice - * are the base pointers of the destination and source, respectively. - * \p ByteCount specifies the number of bytes to copy. - * - * \param dstDevice - Destination device pointer - * \param srcDevice - Source device pointer - * \param ByteCount - Size of memory copy in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_sync - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMemcpy, - * ::cudaMemcpyToSymbol, - * ::cudaMemcpyFromSymbol - */ -CUresult CUDAAPI cuMemcpyDtoD(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount); - -/** - * \brief Copies memory from Device to Array - * - * Copies from device memory to a 1D CUDA array. \p dstArray and \p dstOffset - * specify the CUDA array handle and starting index of the destination data. - * \p srcDevice specifies the base pointer of the source. \p ByteCount - * specifies the number of bytes to copy. - * - * \param dstArray - Destination array - * \param dstOffset - Offset in bytes of destination array - * \param srcDevice - Source device pointer - * \param ByteCount - Size of memory copy in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_sync - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMemcpyToArray - */ -CUresult CUDAAPI cuMemcpyDtoA(CUarray dstArray, size_t dstOffset, CUdeviceptr srcDevice, size_t ByteCount); - -/** - * \brief Copies memory from Array to Device - * - * Copies from one 1D CUDA array to device memory. \p dstDevice specifies the - * base pointer of the destination and must be naturally aligned with the CUDA - * array elements. \p srcArray and \p srcOffset specify the CUDA array handle - * and the offset in bytes into the array where the copy is to begin. - * \p ByteCount specifies the number of bytes to copy and must be evenly - * divisible by the array element size. - * - * \param dstDevice - Destination device pointer - * \param srcArray - Source array - * \param srcOffset - Offset in bytes of source array - * \param ByteCount - Size of memory copy in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_sync - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMemcpyFromArray - */ -CUresult CUDAAPI cuMemcpyAtoD(CUdeviceptr dstDevice, CUarray srcArray, size_t srcOffset, size_t ByteCount); - -/** - * \brief Copies memory from Host to Array - * - * Copies from host memory to a 1D CUDA array. \p dstArray and \p dstOffset - * specify the CUDA array handle and starting offset in bytes of the destination - * data. \p pSrc specifies the base address of the source. \p ByteCount specifies - * the number of bytes to copy. - * - * \param dstArray - Destination array - * \param dstOffset - Offset in bytes of destination array - * \param srcHost - Source host pointer - * \param ByteCount - Size of memory copy in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_sync - * \note_memcpy - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMemcpyToArray - */ -CUresult CUDAAPI cuMemcpyHtoA(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount); - -/** - * \brief Copies memory from Array to Host - * - * Copies from one 1D CUDA array to host memory. \p dstHost specifies the base - * pointer of the destination. \p srcArray and \p srcOffset specify the CUDA - * array handle and starting offset in bytes of the source data. - * \p ByteCount specifies the number of bytes to copy. - * - * \param dstHost - Destination device pointer - * \param srcArray - Source array - * \param srcOffset - Offset in bytes of source array - * \param ByteCount - Size of memory copy in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_sync - * \note_memcpy - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMemcpyFromArray - */ -CUresult CUDAAPI cuMemcpyAtoH(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount); - -/** - * \brief Copies memory from Array to Array - * - * Copies from one 1D CUDA array to another. \p dstArray and \p srcArray - * specify the handles of the destination and source CUDA arrays for the copy, - * respectively. \p dstOffset and \p srcOffset specify the destination and - * source offsets in bytes into the CUDA arrays. \p ByteCount is the number of - * bytes to be copied. The size of the elements in the CUDA arrays need not be - * the same format, but the elements must be the same size; and count must be - * evenly divisible by that size. - * - * \param dstArray - Destination array - * \param dstOffset - Offset in bytes of destination array - * \param srcArray - Source array - * \param srcOffset - Offset in bytes of source array - * \param ByteCount - Size of memory copy in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_sync - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMemcpyArrayToArray - */ -CUresult CUDAAPI cuMemcpyAtoA(CUarray dstArray, size_t dstOffset, CUarray srcArray, size_t srcOffset, size_t ByteCount); - -/** - * \brief Copies memory for 2D arrays - * - * Perform a 2D memory copy according to the parameters specified in \p pCopy. - * The ::CUDA_MEMCPY2D structure is defined as: - * - * \code - typedef struct CUDA_MEMCPY2D_st { - unsigned int srcXInBytes, srcY; - CUmemorytype srcMemoryType; - const void *srcHost; - CUdeviceptr srcDevice; - CUarray srcArray; - unsigned int srcPitch; - - unsigned int dstXInBytes, dstY; - CUmemorytype dstMemoryType; - void *dstHost; - CUdeviceptr dstDevice; - CUarray dstArray; - unsigned int dstPitch; - - unsigned int WidthInBytes; - unsigned int Height; - } CUDA_MEMCPY2D; - * \endcode - * where: - * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the - * source and destination, respectively; ::CUmemorytype_enum is defined as: - * - * \code - typedef enum CUmemorytype_enum { - CU_MEMORYTYPE_HOST = 0x01, - CU_MEMORYTYPE_DEVICE = 0x02, - CU_MEMORYTYPE_ARRAY = 0x03, - CU_MEMORYTYPE_UNIFIED = 0x04 - } CUmemorytype; - * \endcode - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch - * specify the (unified virtual address space) base address of the source data - * and the bytes per row to apply. ::srcArray is ignored. - * This value may be used only if unified addressing is supported in the calling - * context. - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost and ::srcPitch - * specify the (host) base address of the source data and the bytes per row to - * apply. ::srcArray is ignored. - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice and ::srcPitch - * specify the (device) base address of the source data and the bytes per row - * to apply. ::srcArray is ignored. - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the - * handle of the source data. ::srcHost, ::srcDevice and ::srcPitch are - * ignored. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch - * specify the (host) base address of the destination data and the bytes per - * row to apply. ::dstArray is ignored. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch - * specify the (unified virtual address space) base address of the source data - * and the bytes per row to apply. ::dstArray is ignored. - * This value may be used only if unified addressing is supported in the calling - * context. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch - * specify the (device) base address of the destination data and the bytes per - * row to apply. ::dstArray is ignored. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the - * handle of the destination data. ::dstHost, ::dstDevice and ::dstPitch are - * ignored. - * - * - ::srcXInBytes and ::srcY specify the base address of the source data for - * the copy. - * - * \par - * For host pointers, the starting address is - * \code - void* Start = (void*)((char*)srcHost+srcY*srcPitch + srcXInBytes); - * \endcode - * - * \par - * For device pointers, the starting address is - * \code - CUdeviceptr Start = srcDevice+srcY*srcPitch+srcXInBytes; - * \endcode - * - * \par - * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array - * element size. - * - * - ::dstXInBytes and ::dstY specify the base address of the destination data - * for the copy. - * - * \par - * For host pointers, the base address is - * \code - void* dstStart = (void*)((char*)dstHost+dstY*dstPitch + dstXInBytes); - * \endcode - * - * \par - * For device pointers, the starting address is - * \code - CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes; - * \endcode - * - * \par - * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array - * element size. - * - * - ::WidthInBytes and ::Height specify the width (in bytes) and height of - * the 2D copy being performed. - * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + - * ::srcXInBytes, and ::dstPitch must be greater than or equal to - * ::WidthInBytes + dstXInBytes. - * - * \par - * ::cuMemcpy2D() returns an error if any pitch is greater than the maximum - * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). ::cuMemAllocPitch() passes back - * pitches that always work with ::cuMemcpy2D(). On intra-device memory copies - * (device to device, CUDA array to device, CUDA array to CUDA array), - * ::cuMemcpy2D() may fail for pitches not computed by ::cuMemAllocPitch(). - * ::cuMemcpy2DUnaligned() does not have this restriction, but may run - * significantly slower in the cases where ::cuMemcpy2D() would have returned - * an error code. - * - * \param pCopy - Parameters for the memory copy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_sync - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMemcpy2D, - * ::cudaMemcpy2DToArray, - * ::cudaMemcpy2DFromArray - */ -CUresult CUDAAPI cuMemcpy2D(const CUDA_MEMCPY2D *pCopy); - -/** - * \brief Copies memory for 2D arrays - * - * Perform a 2D memory copy according to the parameters specified in \p pCopy. - * The ::CUDA_MEMCPY2D structure is defined as: - * - * \code - typedef struct CUDA_MEMCPY2D_st { - unsigned int srcXInBytes, srcY; - CUmemorytype srcMemoryType; - const void *srcHost; - CUdeviceptr srcDevice; - CUarray srcArray; - unsigned int srcPitch; - unsigned int dstXInBytes, dstY; - CUmemorytype dstMemoryType; - void *dstHost; - CUdeviceptr dstDevice; - CUarray dstArray; - unsigned int dstPitch; - unsigned int WidthInBytes; - unsigned int Height; - } CUDA_MEMCPY2D; - * \endcode - * where: - * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the - * source and destination, respectively; ::CUmemorytype_enum is defined as: - * - * \code - typedef enum CUmemorytype_enum { - CU_MEMORYTYPE_HOST = 0x01, - CU_MEMORYTYPE_DEVICE = 0x02, - CU_MEMORYTYPE_ARRAY = 0x03, - CU_MEMORYTYPE_UNIFIED = 0x04 - } CUmemorytype; - * \endcode - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch - * specify the (unified virtual address space) base address of the source data - * and the bytes per row to apply. ::srcArray is ignored. - * This value may be used only if unified addressing is supported in the calling - * context. - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost and ::srcPitch - * specify the (host) base address of the source data and the bytes per row to - * apply. ::srcArray is ignored. - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice and ::srcPitch - * specify the (device) base address of the source data and the bytes per row - * to apply. ::srcArray is ignored. - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the - * handle of the source data. ::srcHost, ::srcDevice and ::srcPitch are - * ignored. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch - * specify the (unified virtual address space) base address of the source data - * and the bytes per row to apply. ::dstArray is ignored. - * This value may be used only if unified addressing is supported in the calling - * context. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch - * specify the (host) base address of the destination data and the bytes per - * row to apply. ::dstArray is ignored. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch - * specify the (device) base address of the destination data and the bytes per - * row to apply. ::dstArray is ignored. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the - * handle of the destination data. ::dstHost, ::dstDevice and ::dstPitch are - * ignored. - * - * - ::srcXInBytes and ::srcY specify the base address of the source data for - * the copy. - * - * \par - * For host pointers, the starting address is - * \code - void* Start = (void*)((char*)srcHost+srcY*srcPitch + srcXInBytes); - * \endcode - * - * \par - * For device pointers, the starting address is - * \code - CUdeviceptr Start = srcDevice+srcY*srcPitch+srcXInBytes; - * \endcode - * - * \par - * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array - * element size. - * - * - ::dstXInBytes and ::dstY specify the base address of the destination data - * for the copy. - * - * \par - * For host pointers, the base address is - * \code - void* dstStart = (void*)((char*)dstHost+dstY*dstPitch + dstXInBytes); - * \endcode - * - * \par - * For device pointers, the starting address is - * \code - CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes; - * \endcode - * - * \par - * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array - * element size. - * - * - ::WidthInBytes and ::Height specify the width (in bytes) and height of - * the 2D copy being performed. - * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + - * ::srcXInBytes, and ::dstPitch must be greater than or equal to - * ::WidthInBytes + dstXInBytes. - * - * \par - * ::cuMemcpy2D() returns an error if any pitch is greater than the maximum - * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). ::cuMemAllocPitch() passes back - * pitches that always work with ::cuMemcpy2D(). On intra-device memory copies - * (device to device, CUDA array to device, CUDA array to CUDA array), - * ::cuMemcpy2D() may fail for pitches not computed by ::cuMemAllocPitch(). - * ::cuMemcpy2DUnaligned() does not have this restriction, but may run - * significantly slower in the cases where ::cuMemcpy2D() would have returned - * an error code. - * - * \param pCopy - Parameters for the memory copy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_sync - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMemcpy2D, - * ::cudaMemcpy2DToArray, - * ::cudaMemcpy2DFromArray - */ -CUresult CUDAAPI cuMemcpy2DUnaligned(const CUDA_MEMCPY2D *pCopy); - -/** - * \brief Copies memory for 3D arrays - * - * Perform a 3D memory copy according to the parameters specified in - * \p pCopy. The ::CUDA_MEMCPY3D structure is defined as: - * - * \code - typedef struct CUDA_MEMCPY3D_st { - - unsigned int srcXInBytes, srcY, srcZ; - unsigned int srcLOD; - CUmemorytype srcMemoryType; - const void *srcHost; - CUdeviceptr srcDevice; - CUarray srcArray; - unsigned int srcPitch; // ignored when src is array - unsigned int srcHeight; // ignored when src is array; may be 0 if Depth==1 - - unsigned int dstXInBytes, dstY, dstZ; - unsigned int dstLOD; - CUmemorytype dstMemoryType; - void *dstHost; - CUdeviceptr dstDevice; - CUarray dstArray; - unsigned int dstPitch; // ignored when dst is array - unsigned int dstHeight; // ignored when dst is array; may be 0 if Depth==1 - - unsigned int WidthInBytes; - unsigned int Height; - unsigned int Depth; - } CUDA_MEMCPY3D; - * \endcode - * where: - * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the - * source and destination, respectively; ::CUmemorytype_enum is defined as: - * - * \code - typedef enum CUmemorytype_enum { - CU_MEMORYTYPE_HOST = 0x01, - CU_MEMORYTYPE_DEVICE = 0x02, - CU_MEMORYTYPE_ARRAY = 0x03, - CU_MEMORYTYPE_UNIFIED = 0x04 - } CUmemorytype; - * \endcode - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch - * specify the (unified virtual address space) base address of the source data - * and the bytes per row to apply. ::srcArray is ignored. - * This value may be used only if unified addressing is supported in the calling - * context. - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost, ::srcPitch and - * ::srcHeight specify the (host) base address of the source data, the bytes - * per row, and the height of each 2D slice of the 3D array. ::srcArray is - * ignored. - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice, ::srcPitch and - * ::srcHeight specify the (device) base address of the source data, the bytes - * per row, and the height of each 2D slice of the 3D array. ::srcArray is - * ignored. - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the - * handle of the source data. ::srcHost, ::srcDevice, ::srcPitch and - * ::srcHeight are ignored. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch - * specify the (unified virtual address space) base address of the source data - * and the bytes per row to apply. ::dstArray is ignored. - * This value may be used only if unified addressing is supported in the calling - * context. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch - * specify the (host) base address of the destination data, the bytes per row, - * and the height of each 2D slice of the 3D array. ::dstArray is ignored. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch - * specify the (device) base address of the destination data, the bytes per - * row, and the height of each 2D slice of the 3D array. ::dstArray is ignored. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the - * handle of the destination data. ::dstHost, ::dstDevice, ::dstPitch and - * ::dstHeight are ignored. - * - * - ::srcXInBytes, ::srcY and ::srcZ specify the base address of the source - * data for the copy. - * - * \par - * For host pointers, the starting address is - * \code - void* Start = (void*)((char*)srcHost+(srcZ*srcHeight+srcY)*srcPitch + srcXInBytes); - * \endcode - * - * \par - * For device pointers, the starting address is - * \code - CUdeviceptr Start = srcDevice+(srcZ*srcHeight+srcY)*srcPitch+srcXInBytes; - * \endcode - * - * \par - * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array - * element size. - * - * - dstXInBytes, ::dstY and ::dstZ specify the base address of the - * destination data for the copy. - * - * \par - * For host pointers, the base address is - * \code - void* dstStart = (void*)((char*)dstHost+(dstZ*dstHeight+dstY)*dstPitch + dstXInBytes); - * \endcode - * - * \par - * For device pointers, the starting address is - * \code - CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes; - * \endcode - * - * \par - * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array - * element size. - * - * - ::WidthInBytes, ::Height and ::Depth specify the width (in bytes), height - * and depth of the 3D copy being performed. - * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + - * ::srcXInBytes, and ::dstPitch must be greater than or equal to - * ::WidthInBytes + dstXInBytes. - * - If specified, ::srcHeight must be greater than or equal to ::Height + - * ::srcY, and ::dstHeight must be greater than or equal to ::Height + ::dstY. - * - * \par - * ::cuMemcpy3D() returns an error if any pitch is greater than the maximum - * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). - * - * The ::srcLOD and ::dstLOD members of the ::CUDA_MEMCPY3D structure must be - * set to 0. - * - * \param pCopy - Parameters for the memory copy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_sync - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMemcpy3D - */ -CUresult CUDAAPI cuMemcpy3D(const CUDA_MEMCPY3D *pCopy); - -/** - * \brief Copies memory between contexts - * - * Perform a 3D memory copy according to the parameters specified in - * \p pCopy. See the definition of the ::CUDA_MEMCPY3D_PEER structure - * for documentation of its parameters. - * - * \param pCopy - Parameters for the memory copy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_sync - * - * \sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync, - * ::cuMemcpy3DPeerAsync, - * ::cudaMemcpy3DPeer - */ -CUresult CUDAAPI cuMemcpy3DPeer(const CUDA_MEMCPY3D_PEER *pCopy); - -/** - * \brief Copies memory asynchronously - * - * Copies data between two pointers. - * \p dst and \p src are base pointers of the destination and source, respectively. - * \p ByteCount specifies the number of bytes to copy. - * Note that this function infers the type of the transfer (host to host, host to - * device, device to device, or device to host) from the pointer values. This - * function is only allowed in contexts which support unified addressing. - * - * \param dst - Destination unified virtual address space pointer - * \param src - Source unified virtual address space pointer - * \param ByteCount - Size of memory copy in bytes - * \param hStream - Stream identifier - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * \note_async - * \note_null_stream - * \note_memcpy - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemcpyAsync, - * ::cudaMemcpyToSymbolAsync, - * ::cudaMemcpyFromSymbolAsync - */ -CUresult CUDAAPI cuMemcpyAsync(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount, CUstream hStream); - -/** - * \brief Copies device memory between two contexts asynchronously. - * - * Copies from device memory in one context to device memory in another - * context. \p dstDevice is the base device pointer of the destination memory - * and \p dstContext is the destination context. \p srcDevice is the base - * device pointer of the source memory and \p srcContext is the source pointer. - * \p ByteCount specifies the number of bytes to copy. - * - * \param dstDevice - Destination device pointer - * \param dstContext - Destination context - * \param srcDevice - Source device pointer - * \param srcContext - Source context - * \param ByteCount - Size of memory copy in bytes - * \param hStream - Stream identifier - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * \note_async - * \note_null_stream - * - * \sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpy3DPeer, ::cuMemcpyDtoDAsync, - * ::cuMemcpy3DPeerAsync, - * ::cudaMemcpyPeerAsync - */ -CUresult CUDAAPI cuMemcpyPeerAsync(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount, CUstream hStream); - -/** - * \brief Copies memory from Host to Device - * - * Copies from host memory to device memory. \p dstDevice and \p srcHost are - * the base addresses of the destination and source, respectively. \p ByteCount - * specifies the number of bytes to copy. - * - * \param dstDevice - Destination device pointer - * \param srcHost - Source host pointer - * \param ByteCount - Size of memory copy in bytes - * \param hStream - Stream identifier - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * \note_async - * \note_null_stream - * \note_memcpy - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemcpyAsync, - * ::cudaMemcpyToSymbolAsync - */ -CUresult CUDAAPI cuMemcpyHtoDAsync(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream); - -/** - * \brief Copies memory from Device to Host - * - * Copies from device to host memory. \p dstHost and \p srcDevice specify the - * base pointers of the destination and source, respectively. \p ByteCount - * specifies the number of bytes to copy. - * - * \param dstHost - Destination host pointer - * \param srcDevice - Source device pointer - * \param ByteCount - Size of memory copy in bytes - * \param hStream - Stream identifier - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * \note_async - * \note_null_stream - * \note_memcpy - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemcpyAsync, - * ::cudaMemcpyFromSymbolAsync - */ -CUresult CUDAAPI cuMemcpyDtoHAsync(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream); - -/** - * \brief Copies memory from Device to Device - * - * Copies from device memory to device memory. \p dstDevice and \p srcDevice - * are the base pointers of the destination and source, respectively. - * \p ByteCount specifies the number of bytes to copy. - * - * \param dstDevice - Destination device pointer - * \param srcDevice - Source device pointer - * \param ByteCount - Size of memory copy in bytes - * \param hStream - Stream identifier - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * \note_async - * \note_null_stream - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemcpyAsync, - * ::cudaMemcpyToSymbolAsync, - * ::cudaMemcpyFromSymbolAsync - */ -CUresult CUDAAPI cuMemcpyDtoDAsync(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream); - -/** - * \brief Copies memory from Host to Array - * - * Copies from host memory to a 1D CUDA array. \p dstArray and \p dstOffset - * specify the CUDA array handle and starting offset in bytes of the - * destination data. \p srcHost specifies the base address of the source. - * \p ByteCount specifies the number of bytes to copy. - * - * \param dstArray - Destination array - * \param dstOffset - Offset in bytes of destination array - * \param srcHost - Source host pointer - * \param ByteCount - Size of memory copy in bytes - * \param hStream - Stream identifier - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * \note_async - * \note_null_stream - * \note_memcpy - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemcpyToArrayAsync - */ -CUresult CUDAAPI cuMemcpyHtoAAsync(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount, CUstream hStream); - -/** - * \brief Copies memory from Array to Host - * - * Copies from one 1D CUDA array to host memory. \p dstHost specifies the base - * pointer of the destination. \p srcArray and \p srcOffset specify the CUDA - * array handle and starting offset in bytes of the source data. - * \p ByteCount specifies the number of bytes to copy. - * - * \param dstHost - Destination pointer - * \param srcArray - Source array - * \param srcOffset - Offset in bytes of source array - * \param ByteCount - Size of memory copy in bytes - * \param hStream - Stream identifier - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * \note_async - * \note_null_stream - * \note_memcpy - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemcpyFromArrayAsync - */ -CUresult CUDAAPI cuMemcpyAtoHAsync(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount, CUstream hStream); - -/** - * \brief Copies memory for 2D arrays - * - * Perform a 2D memory copy according to the parameters specified in \p pCopy. - * The ::CUDA_MEMCPY2D structure is defined as: - * - * \code - typedef struct CUDA_MEMCPY2D_st { - unsigned int srcXInBytes, srcY; - CUmemorytype srcMemoryType; - const void *srcHost; - CUdeviceptr srcDevice; - CUarray srcArray; - unsigned int srcPitch; - unsigned int dstXInBytes, dstY; - CUmemorytype dstMemoryType; - void *dstHost; - CUdeviceptr dstDevice; - CUarray dstArray; - unsigned int dstPitch; - unsigned int WidthInBytes; - unsigned int Height; - } CUDA_MEMCPY2D; - * \endcode - * where: - * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the - * source and destination, respectively; ::CUmemorytype_enum is defined as: - * - * \code - typedef enum CUmemorytype_enum { - CU_MEMORYTYPE_HOST = 0x01, - CU_MEMORYTYPE_DEVICE = 0x02, - CU_MEMORYTYPE_ARRAY = 0x03, - CU_MEMORYTYPE_UNIFIED = 0x04 - } CUmemorytype; - * \endcode - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost and ::srcPitch - * specify the (host) base address of the source data and the bytes per row to - * apply. ::srcArray is ignored. - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch - * specify the (unified virtual address space) base address of the source data - * and the bytes per row to apply. ::srcArray is ignored. - * This value may be used only if unified addressing is supported in the calling - * context. - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice and ::srcPitch - * specify the (device) base address of the source data and the bytes per row - * to apply. ::srcArray is ignored. - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the - * handle of the source data. ::srcHost, ::srcDevice and ::srcPitch are - * ignored. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch - * specify the (unified virtual address space) base address of the source data - * and the bytes per row to apply. ::dstArray is ignored. - * This value may be used only if unified addressing is supported in the calling - * context. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch - * specify the (host) base address of the destination data and the bytes per - * row to apply. ::dstArray is ignored. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch - * specify the (device) base address of the destination data and the bytes per - * row to apply. ::dstArray is ignored. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the - * handle of the destination data. ::dstHost, ::dstDevice and ::dstPitch are - * ignored. - * - * - ::srcXInBytes and ::srcY specify the base address of the source data for - * the copy. - * - * \par - * For host pointers, the starting address is - * \code - void* Start = (void*)((char*)srcHost+srcY*srcPitch + srcXInBytes); - * \endcode - * - * \par - * For device pointers, the starting address is - * \code - CUdeviceptr Start = srcDevice+srcY*srcPitch+srcXInBytes; - * \endcode - * - * \par - * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array - * element size. - * - * - ::dstXInBytes and ::dstY specify the base address of the destination data - * for the copy. - * - * \par - * For host pointers, the base address is - * \code - void* dstStart = (void*)((char*)dstHost+dstY*dstPitch + dstXInBytes); - * \endcode - * - * \par - * For device pointers, the starting address is - * \code - CUdeviceptr dstStart = dstDevice+dstY*dstPitch+dstXInBytes; - * \endcode - * - * \par - * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array - * element size. - * - * - ::WidthInBytes and ::Height specify the width (in bytes) and height of - * the 2D copy being performed. - * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + - * ::srcXInBytes, and ::dstPitch must be greater than or equal to - * ::WidthInBytes + dstXInBytes. - * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + - * ::srcXInBytes, and ::dstPitch must be greater than or equal to - * ::WidthInBytes + dstXInBytes. - * - If specified, ::srcHeight must be greater than or equal to ::Height + - * ::srcY, and ::dstHeight must be greater than or equal to ::Height + ::dstY. - * - * \par - * ::cuMemcpy2DAsync() returns an error if any pitch is greater than the maximum - * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). ::cuMemAllocPitch() passes back - * pitches that always work with ::cuMemcpy2D(). On intra-device memory copies - * (device to device, CUDA array to device, CUDA array to CUDA array), - * ::cuMemcpy2DAsync() may fail for pitches not computed by ::cuMemAllocPitch(). - * - * \param pCopy - Parameters for the memory copy - * \param hStream - Stream identifier - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * \note_async - * \note_null_stream - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemcpy2DAsync, - * ::cudaMemcpy2DToArrayAsync, - * ::cudaMemcpy2DFromArrayAsync - */ -CUresult CUDAAPI cuMemcpy2DAsync(const CUDA_MEMCPY2D *pCopy, CUstream hStream); - -/** - * \brief Copies memory for 3D arrays - * - * Perform a 3D memory copy according to the parameters specified in - * \p pCopy. The ::CUDA_MEMCPY3D structure is defined as: - * - * \code - typedef struct CUDA_MEMCPY3D_st { - - unsigned int srcXInBytes, srcY, srcZ; - unsigned int srcLOD; - CUmemorytype srcMemoryType; - const void *srcHost; - CUdeviceptr srcDevice; - CUarray srcArray; - unsigned int srcPitch; // ignored when src is array - unsigned int srcHeight; // ignored when src is array; may be 0 if Depth==1 - - unsigned int dstXInBytes, dstY, dstZ; - unsigned int dstLOD; - CUmemorytype dstMemoryType; - void *dstHost; - CUdeviceptr dstDevice; - CUarray dstArray; - unsigned int dstPitch; // ignored when dst is array - unsigned int dstHeight; // ignored when dst is array; may be 0 if Depth==1 - - unsigned int WidthInBytes; - unsigned int Height; - unsigned int Depth; - } CUDA_MEMCPY3D; - * \endcode - * where: - * - ::srcMemoryType and ::dstMemoryType specify the type of memory of the - * source and destination, respectively; ::CUmemorytype_enum is defined as: - * - * \code - typedef enum CUmemorytype_enum { - CU_MEMORYTYPE_HOST = 0x01, - CU_MEMORYTYPE_DEVICE = 0x02, - CU_MEMORYTYPE_ARRAY = 0x03, - CU_MEMORYTYPE_UNIFIED = 0x04 - } CUmemorytype; - * \endcode - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::srcDevice and ::srcPitch - * specify the (unified virtual address space) base address of the source data - * and the bytes per row to apply. ::srcArray is ignored. - * This value may be used only if unified addressing is supported in the calling - * context. - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_HOST, ::srcHost, ::srcPitch and - * ::srcHeight specify the (host) base address of the source data, the bytes - * per row, and the height of each 2D slice of the 3D array. ::srcArray is - * ignored. - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_DEVICE, ::srcDevice, ::srcPitch and - * ::srcHeight specify the (device) base address of the source data, the bytes - * per row, and the height of each 2D slice of the 3D array. ::srcArray is - * ignored. - * - * \par - * If ::srcMemoryType is ::CU_MEMORYTYPE_ARRAY, ::srcArray specifies the - * handle of the source data. ::srcHost, ::srcDevice, ::srcPitch and - * ::srcHeight are ignored. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_UNIFIED, ::dstDevice and ::dstPitch - * specify the (unified virtual address space) base address of the source data - * and the bytes per row to apply. ::dstArray is ignored. - * This value may be used only if unified addressing is supported in the calling - * context. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_HOST, ::dstHost and ::dstPitch - * specify the (host) base address of the destination data, the bytes per row, - * and the height of each 2D slice of the 3D array. ::dstArray is ignored. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_DEVICE, ::dstDevice and ::dstPitch - * specify the (device) base address of the destination data, the bytes per - * row, and the height of each 2D slice of the 3D array. ::dstArray is ignored. - * - * \par - * If ::dstMemoryType is ::CU_MEMORYTYPE_ARRAY, ::dstArray specifies the - * handle of the destination data. ::dstHost, ::dstDevice, ::dstPitch and - * ::dstHeight are ignored. - * - * - ::srcXInBytes, ::srcY and ::srcZ specify the base address of the source - * data for the copy. - * - * \par - * For host pointers, the starting address is - * \code - void* Start = (void*)((char*)srcHost+(srcZ*srcHeight+srcY)*srcPitch + srcXInBytes); - * \endcode - * - * \par - * For device pointers, the starting address is - * \code - CUdeviceptr Start = srcDevice+(srcZ*srcHeight+srcY)*srcPitch+srcXInBytes; - * \endcode - * - * \par - * For CUDA arrays, ::srcXInBytes must be evenly divisible by the array - * element size. - * - * - dstXInBytes, ::dstY and ::dstZ specify the base address of the - * destination data for the copy. - * - * \par - * For host pointers, the base address is - * \code - void* dstStart = (void*)((char*)dstHost+(dstZ*dstHeight+dstY)*dstPitch + dstXInBytes); - * \endcode - * - * \par - * For device pointers, the starting address is - * \code - CUdeviceptr dstStart = dstDevice+(dstZ*dstHeight+dstY)*dstPitch+dstXInBytes; - * \endcode - * - * \par - * For CUDA arrays, ::dstXInBytes must be evenly divisible by the array - * element size. - * - * - ::WidthInBytes, ::Height and ::Depth specify the width (in bytes), height - * and depth of the 3D copy being performed. - * - If specified, ::srcPitch must be greater than or equal to ::WidthInBytes + - * ::srcXInBytes, and ::dstPitch must be greater than or equal to - * ::WidthInBytes + dstXInBytes. - * - If specified, ::srcHeight must be greater than or equal to ::Height + - * ::srcY, and ::dstHeight must be greater than or equal to ::Height + ::dstY. - * - * \par - * ::cuMemcpy3DAsync() returns an error if any pitch is greater than the maximum - * allowed (::CU_DEVICE_ATTRIBUTE_MAX_PITCH). - * - * The ::srcLOD and ::dstLOD members of the ::CUDA_MEMCPY3D structure must be - * set to 0. - * - * \param pCopy - Parameters for the memory copy - * \param hStream - Stream identifier - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * \note_async - * \note_null_stream - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemcpy3DAsync - */ -CUresult CUDAAPI cuMemcpy3DAsync(const CUDA_MEMCPY3D *pCopy, CUstream hStream); - -/** - * \brief Copies memory between contexts asynchronously. - * - * Perform a 3D memory copy according to the parameters specified in - * \p pCopy. See the definition of the ::CUDA_MEMCPY3D_PEER structure - * for documentation of its parameters. - * - * \param pCopy - Parameters for the memory copy - * \param hStream - Stream identifier - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_async - * \note_null_stream - * - * \sa ::cuMemcpyDtoD, ::cuMemcpyPeer, ::cuMemcpyDtoDAsync, ::cuMemcpyPeerAsync, - * ::cuMemcpy3DPeerAsync, - * ::cudaMemcpy3DPeerAsync - */ -CUresult CUDAAPI cuMemcpy3DPeerAsync(const CUDA_MEMCPY3D_PEER *pCopy, CUstream hStream); - -/** - * \brief Initializes device memory - * - * Sets the memory range of \p N 8-bit values to the specified value - * \p uc. - * - * \param dstDevice - Destination device pointer - * \param uc - Value to set - * \param N - Number of elements - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_memset - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemset - */ -CUresult CUDAAPI cuMemsetD8(CUdeviceptr dstDevice, unsigned char uc, size_t N); - -/** - * \brief Initializes device memory - * - * Sets the memory range of \p N 16-bit values to the specified value - * \p us. The \p dstDevice pointer must be two byte aligned. - * - * \param dstDevice - Destination device pointer - * \param us - Value to set - * \param N - Number of elements - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_memset - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16Async, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemset - */ -CUresult CUDAAPI cuMemsetD16(CUdeviceptr dstDevice, unsigned short us, size_t N); - -/** - * \brief Initializes device memory - * - * Sets the memory range of \p N 32-bit values to the specified value - * \p ui. The \p dstDevice pointer must be four byte aligned. - * - * \param dstDevice - Destination device pointer - * \param ui - Value to set - * \param N - Number of elements - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_memset - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, - * ::cuMemsetD32Async, - * ::cudaMemset - */ -CUresult CUDAAPI cuMemsetD32(CUdeviceptr dstDevice, unsigned int ui, size_t N); - -/** - * \brief Initializes device memory - * - * Sets the 2D memory range of \p Width 8-bit values to the specified value - * \p uc. \p Height specifies the number of rows to set, and \p dstPitch - * specifies the number of bytes between each row. This function performs - * fastest when the pitch is one that has been passed back by - * ::cuMemAllocPitch(). - * - * \param dstDevice - Destination device pointer - * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) - * \param uc - Value to set - * \param Width - Width of row - * \param Height - Number of rows - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_memset - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemset2D - */ -CUresult CUDAAPI cuMemsetD2D8(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height); - -/** - * \brief Initializes device memory - * - * Sets the 2D memory range of \p Width 16-bit values to the specified value - * \p us. \p Height specifies the number of rows to set, and \p dstPitch - * specifies the number of bytes between each row. The \p dstDevice pointer - * and \p dstPitch offset must be two byte aligned. This function performs - * fastest when the pitch is one that has been passed back by - * ::cuMemAllocPitch(). - * - * \param dstDevice - Destination device pointer - * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) - * \param us - Value to set - * \param Width - Width of row - * \param Height - Number of rows - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_memset - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemset2D - */ -CUresult CUDAAPI cuMemsetD2D16(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height); - -/** - * \brief Initializes device memory - * - * Sets the 2D memory range of \p Width 32-bit values to the specified value - * \p ui. \p Height specifies the number of rows to set, and \p dstPitch - * specifies the number of bytes between each row. The \p dstDevice pointer - * and \p dstPitch offset must be four byte aligned. This function performs - * fastest when the pitch is one that has been passed back by - * ::cuMemAllocPitch(). - * - * \param dstDevice - Destination device pointer - * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) - * \param ui - Value to set - * \param Width - Width of row - * \param Height - Number of rows - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_memset - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemset2D - */ -CUresult CUDAAPI cuMemsetD2D32(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height); - -/** - * \brief Sets device memory - * - * Sets the memory range of \p N 8-bit values to the specified value - * \p uc. - * - * \param dstDevice - Destination device pointer - * \param uc - Value to set - * \param N - Number of elements - * \param hStream - Stream identifier - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_memset - * \note_null_stream - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD16Async, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemsetAsync - */ -CUresult CUDAAPI cuMemsetD8Async(CUdeviceptr dstDevice, unsigned char uc, size_t N, CUstream hStream); - -/** - * \brief Sets device memory - * - * Sets the memory range of \p N 16-bit values to the specified value - * \p us. The \p dstDevice pointer must be two byte aligned. - * - * \param dstDevice - Destination device pointer - * \param us - Value to set - * \param N - Number of elements - * \param hStream - Stream identifier - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_memset - * \note_null_stream - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemsetAsync - */ -CUresult CUDAAPI cuMemsetD16Async(CUdeviceptr dstDevice, unsigned short us, size_t N, CUstream hStream); - -/** - * \brief Sets device memory - * - * Sets the memory range of \p N 32-bit values to the specified value - * \p ui. The \p dstDevice pointer must be four byte aligned. - * - * \param dstDevice - Destination device pointer - * \param ui - Value to set - * \param N - Number of elements - * \param hStream - Stream identifier - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_memset - * \note_null_stream - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, ::cuMemsetD32, - * ::cudaMemsetAsync - */ -CUresult CUDAAPI cuMemsetD32Async(CUdeviceptr dstDevice, unsigned int ui, size_t N, CUstream hStream); - -/** - * \brief Sets device memory - * - * Sets the 2D memory range of \p Width 8-bit values to the specified value - * \p uc. \p Height specifies the number of rows to set, and \p dstPitch - * specifies the number of bytes between each row. This function performs - * fastest when the pitch is one that has been passed back by - * ::cuMemAllocPitch(). - * - * \param dstDevice - Destination device pointer - * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) - * \param uc - Value to set - * \param Width - Width of row - * \param Height - Number of rows - * \param hStream - Stream identifier - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_memset - * \note_null_stream - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemset2DAsync - */ -CUresult CUDAAPI cuMemsetD2D8Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height, CUstream hStream); - -/** - * \brief Sets device memory - * - * Sets the 2D memory range of \p Width 16-bit values to the specified value - * \p us. \p Height specifies the number of rows to set, and \p dstPitch - * specifies the number of bytes between each row. The \p dstDevice pointer - * and \p dstPitch offset must be two byte aligned. This function performs - * fastest when the pitch is one that has been passed back by - * ::cuMemAllocPitch(). - * - * \param dstDevice - Destination device pointer - * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) - * \param us - Value to set - * \param Width - Width of row - * \param Height - Number of rows - * \param hStream - Stream identifier - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_memset - * \note_null_stream - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D32, ::cuMemsetD2D32Async, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemset2DAsync - */ -CUresult CUDAAPI cuMemsetD2D16Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height, CUstream hStream); - -/** - * \brief Sets device memory - * - * Sets the 2D memory range of \p Width 32-bit values to the specified value - * \p ui. \p Height specifies the number of rows to set, and \p dstPitch - * specifies the number of bytes between each row. The \p dstDevice pointer - * and \p dstPitch offset must be four byte aligned. This function performs - * fastest when the pitch is one that has been passed back by - * ::cuMemAllocPitch(). - * - * \param dstDevice - Destination device pointer - * \param dstPitch - Pitch of destination device pointer(Unused if \p Height is 1) - * \param ui - Value to set - * \param Width - Width of row - * \param Height - Number of rows - * \param hStream - Stream identifier - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * \note_memset - * \note_null_stream - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D8Async, - * ::cuMemsetD2D16, ::cuMemsetD2D16Async, ::cuMemsetD2D32, - * ::cuMemsetD8, ::cuMemsetD8Async, ::cuMemsetD16, ::cuMemsetD16Async, - * ::cuMemsetD32, ::cuMemsetD32Async, - * ::cudaMemset2DAsync - */ -CUresult CUDAAPI cuMemsetD2D32Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height, CUstream hStream); - -/** - * \brief Creates a 1D or 2D CUDA array - * - * Creates a CUDA array according to the ::CUDA_ARRAY_DESCRIPTOR structure - * \p pAllocateArray and returns a handle to the new CUDA array in \p *pHandle. - * The ::CUDA_ARRAY_DESCRIPTOR is defined as: - * - * \code - typedef struct { - unsigned int Width; - unsigned int Height; - CUarray_format Format; - unsigned int NumChannels; - } CUDA_ARRAY_DESCRIPTOR; - * \endcode - * where: - * - * - \p Width, and \p Height are the width, and height of the CUDA array (in - * elements); the CUDA array is one-dimensional if height is 0, two-dimensional - * otherwise; - * - ::Format specifies the format of the elements; ::CUarray_format is - * defined as: - * \code - typedef enum CUarray_format_enum { - CU_AD_FORMAT_UNSIGNED_INT8 = 0x01, - CU_AD_FORMAT_UNSIGNED_INT16 = 0x02, - CU_AD_FORMAT_UNSIGNED_INT32 = 0x03, - CU_AD_FORMAT_SIGNED_INT8 = 0x08, - CU_AD_FORMAT_SIGNED_INT16 = 0x09, - CU_AD_FORMAT_SIGNED_INT32 = 0x0a, - CU_AD_FORMAT_HALF = 0x10, - CU_AD_FORMAT_FLOAT = 0x20 - } CUarray_format; - * \endcode - * - \p NumChannels specifies the number of packed components per CUDA array - * element; it may be 1, 2, or 4; - * - * Here are examples of CUDA array descriptions: - * - * Description for a CUDA array of 2048 floats: - * \code - CUDA_ARRAY_DESCRIPTOR desc; - desc.Format = CU_AD_FORMAT_FLOAT; - desc.NumChannels = 1; - desc.Width = 2048; - desc.Height = 1; - * \endcode - * - * Description for a 64 x 64 CUDA array of floats: - * \code - CUDA_ARRAY_DESCRIPTOR desc; - desc.Format = CU_AD_FORMAT_FLOAT; - desc.NumChannels = 1; - desc.Width = 64; - desc.Height = 64; - * \endcode - * - * Description for a \p width x \p height CUDA array of 64-bit, 4x16-bit - * float16's: - * \code - CUDA_ARRAY_DESCRIPTOR desc; - desc.FormatFlags = CU_AD_FORMAT_HALF; - desc.NumChannels = 4; - desc.Width = width; - desc.Height = height; - * \endcode - * - * Description for a \p width x \p height CUDA array of 16-bit elements, each - * of which is two 8-bit unsigned chars: - * \code - CUDA_ARRAY_DESCRIPTOR arrayDesc; - desc.FormatFlags = CU_AD_FORMAT_UNSIGNED_INT8; - desc.NumChannels = 2; - desc.Width = width; - desc.Height = height; - * \endcode - * - * \param pHandle - Returned array - * \param pAllocateArray - Array descriptor - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_UNKNOWN - * \notefnerr - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMallocArray - */ -CUresult CUDAAPI cuArrayCreate(CUarray *pHandle, const CUDA_ARRAY_DESCRIPTOR *pAllocateArray); - -/** - * \brief Get a 1D or 2D CUDA array descriptor - * - * Returns in \p *pArrayDescriptor a descriptor containing information on the - * format and dimensions of the CUDA array \p hArray. It is useful for - * subroutines that have been passed a CUDA array, but need to know the CUDA - * array parameters for validation or other purposes. - * - * \param pArrayDescriptor - Returned array descriptor - * \param hArray - Array to get descriptor of - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaArrayGetInfo - */ -CUresult CUDAAPI cuArrayGetDescriptor(CUDA_ARRAY_DESCRIPTOR *pArrayDescriptor, CUarray hArray); - -/** - * \brief Returns the layout properties of a sparse CUDA array - * - * Returns the layout properties of a sparse CUDA array in \p sparseProperties - * If the CUDA array is not allocated with flag ::CUDA_ARRAY3D_SPARSE - * ::CUDA_ERROR_INVALID_VALUE will be returned. - * - * If the returned value in ::CUDA_ARRAY_SPARSE_PROPERTIES::flags contains ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL, - * then ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize represents the total size of the array. Otherwise, it will be zero. - * Also, the returned value in ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailFirstLevel is always zero. - * Note that the \p array must have been allocated using ::cuArrayCreate or ::cuArray3DCreate. For CUDA arrays obtained - * using ::cuMipmappedArrayGetLevel, ::CUDA_ERROR_INVALID_VALUE will be returned. Instead, ::cuMipmappedArrayGetSparseProperties - * must be used to obtain the sparse properties of the entire CUDA mipmapped array to which \p array belongs to. - * - * \return - * ::CUDA_SUCCESS - * ::CUDA_ERROR_INVALID_VALUE - * - * \param[out] sparseProperties - Pointer to ::CUDA_ARRAY_SPARSE_PROPERTIES - * \param[in] array - CUDA array to get the sparse properties of - * \sa ::cuMipmappedArrayGetSparseProperties, ::cuMemMapArrayAsync - */ -CUresult CUDAAPI cuArrayGetSparseProperties(CUDA_ARRAY_SPARSE_PROPERTIES *sparseProperties, CUarray array); - -/** - * \brief Returns the layout properties of a sparse CUDA mipmapped array - * - * Returns the sparse array layout properties in \p sparseProperties - * If the CUDA mipmapped array is not allocated with flag ::CUDA_ARRAY3D_SPARSE - * ::CUDA_ERROR_INVALID_VALUE will be returned. - * - * For non-layered CUDA mipmapped arrays, ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize returns the - * size of the mip tail region. The mip tail region includes all mip levels whose width, height or depth - * is less than that of the tile. - * For layered CUDA mipmapped arrays, if ::CUDA_ARRAY_SPARSE_PROPERTIES::flags contains ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL, - * then ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize specifies the size of the mip tail of all layers combined. - * Otherwise, ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize specifies mip tail size per layer. - * The returned value of ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailFirstLevel is valid only if ::CUDA_ARRAY_SPARSE_PROPERTIES::miptailSize is non-zero. - * - * \return - * ::CUDA_SUCCESS - * ::CUDA_ERROR_INVALID_VALUE - * - * \param[out] sparseProperties - Pointer to ::CUDA_ARRAY_SPARSE_PROPERTIES - * \param[in] mipmap - CUDA mipmapped array to get the sparse properties of - * \sa ::cuArrayGetSparseProperties, ::cuMemMapArrayAsync - */ -CUresult CUDAAPI cuMipmappedArrayGetSparseProperties(CUDA_ARRAY_SPARSE_PROPERTIES *sparseProperties, CUmipmappedArray mipmap); - -/** - * \brief Gets a CUDA array plane from a CUDA array - * - * Returns in \p pPlaneArray a CUDA array that represents a single format plane - * of the CUDA array \p hArray. - * - * If \p planeIdx is greater than the maximum number of planes in this array or if the array does - * not have a multi-planar format e.g: ::CU_AD_FORMAT_NV12, then ::CUDA_ERROR_INVALID_VALUE is returned. - * - * Note that if the \p hArray has format ::CU_AD_FORMAT_NV12, then passing in 0 for \p planeIdx returns - * a CUDA array of the same size as \p hArray but with one channel and ::CU_AD_FORMAT_UNSIGNED_INT8 as its format. - * If 1 is passed for \p planeIdx, then the returned CUDA array has half the height and width - * of \p hArray with two channels and ::CU_AD_FORMAT_UNSIGNED_INT8 as its format. - * - * \param pPlaneArray - Returned CUDA array referenced by the \p planeIdx - * \param hArray - Multiplanar CUDA array - * \param planeIdx - Plane index - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * - * \sa - * ::cuArrayCreate, - * ::cudaGetArrayPlane - */ -CUresult CUDAAPI cuArrayGetPlane(CUarray *pPlaneArray, CUarray hArray, unsigned int planeIdx); - -/** - * \brief Destroys a CUDA array - * - * Destroys the CUDA array \p hArray. - * - * \param hArray - Array to destroy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_ARRAY_IS_MAPPED, - * ::CUDA_ERROR_CONTEXT_IS_DESTROYED - * \notefnerr - * - * \sa ::cuArray3DCreate, ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaFreeArray - */ -CUresult CUDAAPI cuArrayDestroy(CUarray hArray); - -/** - * \brief Creates a 3D CUDA array - * - * Creates a CUDA array according to the ::CUDA_ARRAY3D_DESCRIPTOR structure - * \p pAllocateArray and returns a handle to the new CUDA array in \p *pHandle. - * The ::CUDA_ARRAY3D_DESCRIPTOR is defined as: - * - * \code - typedef struct { - unsigned int Width; - unsigned int Height; - unsigned int Depth; - CUarray_format Format; - unsigned int NumChannels; - unsigned int Flags; - } CUDA_ARRAY3D_DESCRIPTOR; - * \endcode - * where: - * - * - \p Width, \p Height, and \p Depth are the width, height, and depth of the - * CUDA array (in elements); the following types of CUDA arrays can be allocated: - * - A 1D array is allocated if \p Height and \p Depth extents are both zero. - * - A 2D array is allocated if only \p Depth extent is zero. - * - A 3D array is allocated if all three extents are non-zero. - * - A 1D layered CUDA array is allocated if only \p Height is zero and the - * ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 1D array. The number - * of layers is determined by the depth extent. - * - A 2D layered CUDA array is allocated if all three extents are non-zero and - * the ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 2D array. The number - * of layers is determined by the depth extent. - * - A cubemap CUDA array is allocated if all three extents are non-zero and the - * ::CUDA_ARRAY3D_CUBEMAP flag is set. \p Width must be equal to \p Height, and - * \p Depth must be six. A cubemap is a special type of 2D layered CUDA array, - * where the six layers represent the six faces of a cube. The order of the six - * layers in memory is the same as that listed in ::CUarray_cubemap_face. - * - A cubemap layered CUDA array is allocated if all three extents are non-zero, - * and both, ::CUDA_ARRAY3D_CUBEMAP and ::CUDA_ARRAY3D_LAYERED flags are set. - * \p Width must be equal to \p Height, and \p Depth must be a multiple of six. - * A cubemap layered CUDA array is a special type of 2D layered CUDA array that - * consists of a collection of cubemaps. The first six layers represent the first - * cubemap, the next six layers form the second cubemap, and so on. - * - * - ::Format specifies the format of the elements; ::CUarray_format is - * defined as: - * \code - typedef enum CUarray_format_enum { - CU_AD_FORMAT_UNSIGNED_INT8 = 0x01, - CU_AD_FORMAT_UNSIGNED_INT16 = 0x02, - CU_AD_FORMAT_UNSIGNED_INT32 = 0x03, - CU_AD_FORMAT_SIGNED_INT8 = 0x08, - CU_AD_FORMAT_SIGNED_INT16 = 0x09, - CU_AD_FORMAT_SIGNED_INT32 = 0x0a, - CU_AD_FORMAT_HALF = 0x10, - CU_AD_FORMAT_FLOAT = 0x20 - } CUarray_format; - * \endcode - * - * - \p NumChannels specifies the number of packed components per CUDA array - * element; it may be 1, 2, or 4; - * - * - ::Flags may be set to - * - ::CUDA_ARRAY3D_LAYERED to enable creation of layered CUDA arrays. If this flag is set, - * \p Depth specifies the number of layers, not the depth of a 3D array. - * - ::CUDA_ARRAY3D_SURFACE_LDST to enable surface references to be bound to the CUDA array. - * If this flag is not set, ::cuSurfRefSetArray will fail when attempting to bind the CUDA array - * to a surface reference. - * - ::CUDA_ARRAY3D_CUBEMAP to enable creation of cubemaps. If this flag is set, \p Width must be - * equal to \p Height, and \p Depth must be six. If the ::CUDA_ARRAY3D_LAYERED flag is also set, - * then \p Depth must be a multiple of six. - * - ::CUDA_ARRAY3D_TEXTURE_GATHER to indicate that the CUDA array will be used for texture gather. - * Texture gather can only be performed on 2D CUDA arrays. - * - * \p Width, \p Height and \p Depth must meet certain size requirements as listed in the following table. - * All values are specified in elements. Note that for brevity's sake, the full name of the device attribute - * is not specified. For ex., TEXTURE1D_WIDTH refers to the device attribute - * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH. - * - * Note that 2D CUDA arrays have different size requirements if the ::CUDA_ARRAY3D_TEXTURE_GATHER flag - * is set. \p Width and \p Height must not be greater than ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH - * and ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT respectively, in that case. - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
CUDA array typeValid extents that must always be met
{(width range in elements), (height range), - * (depth range)}
Valid extents with CUDA_ARRAY3D_SURFACE_LDST set
- * {(width range in elements), (height range), (depth range)}
1D{ (1,TEXTURE1D_WIDTH), 0, 0 }{ (1,SURFACE1D_WIDTH), 0, 0 }
2D{ (1,TEXTURE2D_WIDTH), (1,TEXTURE2D_HEIGHT), 0 }{ (1,SURFACE2D_WIDTH), (1,SURFACE2D_HEIGHT), 0 }
3D{ (1,TEXTURE3D_WIDTH), (1,TEXTURE3D_HEIGHT), (1,TEXTURE3D_DEPTH) } - *
OR
{ (1,TEXTURE3D_WIDTH_ALTERNATE), (1,TEXTURE3D_HEIGHT_ALTERNATE), - * (1,TEXTURE3D_DEPTH_ALTERNATE) }
{ (1,SURFACE3D_WIDTH), (1,SURFACE3D_HEIGHT), - * (1,SURFACE3D_DEPTH) }
1D Layered{ (1,TEXTURE1D_LAYERED_WIDTH), 0, - * (1,TEXTURE1D_LAYERED_LAYERS) }{ (1,SURFACE1D_LAYERED_WIDTH), 0, - * (1,SURFACE1D_LAYERED_LAYERS) }
2D Layered{ (1,TEXTURE2D_LAYERED_WIDTH), (1,TEXTURE2D_LAYERED_HEIGHT), - * (1,TEXTURE2D_LAYERED_LAYERS) }{ (1,SURFACE2D_LAYERED_WIDTH), (1,SURFACE2D_LAYERED_HEIGHT), - * (1,SURFACE2D_LAYERED_LAYERS) }
Cubemap{ (1,TEXTURECUBEMAP_WIDTH), (1,TEXTURECUBEMAP_WIDTH), 6 }{ (1,SURFACECUBEMAP_WIDTH), - * (1,SURFACECUBEMAP_WIDTH), 6 }
Cubemap Layered{ (1,TEXTURECUBEMAP_LAYERED_WIDTH), (1,TEXTURECUBEMAP_LAYERED_WIDTH), - * (1,TEXTURECUBEMAP_LAYERED_LAYERS) }{ (1,SURFACECUBEMAP_LAYERED_WIDTH), (1,SURFACECUBEMAP_LAYERED_WIDTH), - * (1,SURFACECUBEMAP_LAYERED_LAYERS) }
- * - * Here are examples of CUDA array descriptions: - * - * Description for a CUDA array of 2048 floats: - * \code - CUDA_ARRAY3D_DESCRIPTOR desc; - desc.Format = CU_AD_FORMAT_FLOAT; - desc.NumChannels = 1; - desc.Width = 2048; - desc.Height = 0; - desc.Depth = 0; - * \endcode - * - * Description for a 64 x 64 CUDA array of floats: - * \code - CUDA_ARRAY3D_DESCRIPTOR desc; - desc.Format = CU_AD_FORMAT_FLOAT; - desc.NumChannels = 1; - desc.Width = 64; - desc.Height = 64; - desc.Depth = 0; - * \endcode - * - * Description for a \p width x \p height x \p depth CUDA array of 64-bit, - * 4x16-bit float16's: - * \code - CUDA_ARRAY3D_DESCRIPTOR desc; - desc.FormatFlags = CU_AD_FORMAT_HALF; - desc.NumChannels = 4; - desc.Width = width; - desc.Height = height; - desc.Depth = depth; - * \endcode - * - * \param pHandle - Returned array - * \param pAllocateArray - 3D array descriptor - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_UNKNOWN - * \notefnerr - * - * \sa ::cuArray3DGetDescriptor, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaMalloc3DArray - */ -CUresult CUDAAPI cuArray3DCreate(CUarray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR *pAllocateArray); - -/** - * \brief Get a 3D CUDA array descriptor - * - * Returns in \p *pArrayDescriptor a descriptor containing information on the - * format and dimensions of the CUDA array \p hArray. It is useful for - * subroutines that have been passed a CUDA array, but need to know the CUDA - * array parameters for validation or other purposes. - * - * This function may be called on 1D and 2D arrays, in which case the \p Height - * and/or \p Depth members of the descriptor struct will be set to 0. - * - * \param pArrayDescriptor - Returned 3D array descriptor - * \param hArray - 3D array to get descriptor of - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_CONTEXT_IS_DESTROYED - * \notefnerr - * - * \sa ::cuArray3DCreate, ::cuArrayCreate, - * ::cuArrayDestroy, ::cuArrayGetDescriptor, ::cuMemAlloc, ::cuMemAllocHost, - * ::cuMemAllocPitch, ::cuMemcpy2D, ::cuMemcpy2DAsync, ::cuMemcpy2DUnaligned, - * ::cuMemcpy3D, ::cuMemcpy3DAsync, ::cuMemcpyAtoA, ::cuMemcpyAtoD, - * ::cuMemcpyAtoH, ::cuMemcpyAtoHAsync, ::cuMemcpyDtoA, ::cuMemcpyDtoD, ::cuMemcpyDtoDAsync, - * ::cuMemcpyDtoH, ::cuMemcpyDtoHAsync, ::cuMemcpyHtoA, ::cuMemcpyHtoAAsync, - * ::cuMemcpyHtoD, ::cuMemcpyHtoDAsync, ::cuMemFree, ::cuMemFreeHost, - * ::cuMemGetAddressRange, ::cuMemGetInfo, ::cuMemHostAlloc, - * ::cuMemHostGetDevicePointer, ::cuMemsetD2D8, ::cuMemsetD2D16, - * ::cuMemsetD2D32, ::cuMemsetD8, ::cuMemsetD16, ::cuMemsetD32, - * ::cudaArrayGetInfo - */ -CUresult CUDAAPI cuArray3DGetDescriptor(CUDA_ARRAY3D_DESCRIPTOR *pArrayDescriptor, CUarray hArray); - -/** - * \brief Creates a CUDA mipmapped array - * - * Creates a CUDA mipmapped array according to the ::CUDA_ARRAY3D_DESCRIPTOR structure - * \p pMipmappedArrayDesc and returns a handle to the new CUDA mipmapped array in \p *pHandle. - * \p numMipmapLevels specifies the number of mipmap levels to be allocated. This value is - * clamped to the range [1, 1 + floor(log2(max(width, height, depth)))]. - * - * The ::CUDA_ARRAY3D_DESCRIPTOR is defined as: - * - * \code - typedef struct { - unsigned int Width; - unsigned int Height; - unsigned int Depth; - CUarray_format Format; - unsigned int NumChannels; - unsigned int Flags; - } CUDA_ARRAY3D_DESCRIPTOR; - * \endcode - * where: - * - * - \p Width, \p Height, and \p Depth are the width, height, and depth of the - * CUDA array (in elements); the following types of CUDA arrays can be allocated: - * - A 1D mipmapped array is allocated if \p Height and \p Depth extents are both zero. - * - A 2D mipmapped array is allocated if only \p Depth extent is zero. - * - A 3D mipmapped array is allocated if all three extents are non-zero. - * - A 1D layered CUDA mipmapped array is allocated if only \p Height is zero and the - * ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 1D array. The number - * of layers is determined by the depth extent. - * - A 2D layered CUDA mipmapped array is allocated if all three extents are non-zero and - * the ::CUDA_ARRAY3D_LAYERED flag is set. Each layer is a 2D array. The number - * of layers is determined by the depth extent. - * - A cubemap CUDA mipmapped array is allocated if all three extents are non-zero and the - * ::CUDA_ARRAY3D_CUBEMAP flag is set. \p Width must be equal to \p Height, and - * \p Depth must be six. A cubemap is a special type of 2D layered CUDA array, - * where the six layers represent the six faces of a cube. The order of the six - * layers in memory is the same as that listed in ::CUarray_cubemap_face. - * - A cubemap layered CUDA mipmapped array is allocated if all three extents are non-zero, - * and both, ::CUDA_ARRAY3D_CUBEMAP and ::CUDA_ARRAY3D_LAYERED flags are set. - * \p Width must be equal to \p Height, and \p Depth must be a multiple of six. - * A cubemap layered CUDA array is a special type of 2D layered CUDA array that - * consists of a collection of cubemaps. The first six layers represent the first - * cubemap, the next six layers form the second cubemap, and so on. - * - * - ::Format specifies the format of the elements; ::CUarray_format is - * defined as: - * \code - typedef enum CUarray_format_enum { - CU_AD_FORMAT_UNSIGNED_INT8 = 0x01, - CU_AD_FORMAT_UNSIGNED_INT16 = 0x02, - CU_AD_FORMAT_UNSIGNED_INT32 = 0x03, - CU_AD_FORMAT_SIGNED_INT8 = 0x08, - CU_AD_FORMAT_SIGNED_INT16 = 0x09, - CU_AD_FORMAT_SIGNED_INT32 = 0x0a, - CU_AD_FORMAT_HALF = 0x10, - CU_AD_FORMAT_FLOAT = 0x20 - } CUarray_format; - * \endcode - * - * - \p NumChannels specifies the number of packed components per CUDA array - * element; it may be 1, 2, or 4; - * - * - ::Flags may be set to - * - ::CUDA_ARRAY3D_LAYERED to enable creation of layered CUDA mipmapped arrays. If this flag is set, - * \p Depth specifies the number of layers, not the depth of a 3D array. - * - ::CUDA_ARRAY3D_SURFACE_LDST to enable surface references to be bound to individual mipmap levels of - * the CUDA mipmapped array. If this flag is not set, ::cuSurfRefSetArray will fail when attempting to - * bind a mipmap level of the CUDA mipmapped array to a surface reference. - * - ::CUDA_ARRAY3D_CUBEMAP to enable creation of mipmapped cubemaps. If this flag is set, \p Width must be - * equal to \p Height, and \p Depth must be six. If the ::CUDA_ARRAY3D_LAYERED flag is also set, - * then \p Depth must be a multiple of six. - * - ::CUDA_ARRAY3D_TEXTURE_GATHER to indicate that the CUDA mipmapped array will be used for texture gather. - * Texture gather can only be performed on 2D CUDA mipmapped arrays. - * - * \p Width, \p Height and \p Depth must meet certain size requirements as listed in the following table. - * All values are specified in elements. Note that for brevity's sake, the full name of the device attribute - * is not specified. For ex., TEXTURE1D_MIPMAPPED_WIDTH refers to the device attribute - * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH. - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
CUDA array typeValid extents that must always be met
{(width range in elements), (height range), - * (depth range)}
Valid extents with CUDA_ARRAY3D_SURFACE_LDST set
- * {(width range in elements), (height range), (depth range)}
1D{ (1,TEXTURE1D_MIPMAPPED_WIDTH), 0, 0 }{ (1,SURFACE1D_WIDTH), 0, 0 }
2D{ (1,TEXTURE2D_MIPMAPPED_WIDTH), (1,TEXTURE2D_MIPMAPPED_HEIGHT), 0 }{ (1,SURFACE2D_WIDTH), (1,SURFACE2D_HEIGHT), 0 }
3D{ (1,TEXTURE3D_WIDTH), (1,TEXTURE3D_HEIGHT), (1,TEXTURE3D_DEPTH) } - *
OR
{ (1,TEXTURE3D_WIDTH_ALTERNATE), (1,TEXTURE3D_HEIGHT_ALTERNATE), - * (1,TEXTURE3D_DEPTH_ALTERNATE) }
{ (1,SURFACE3D_WIDTH), (1,SURFACE3D_HEIGHT), - * (1,SURFACE3D_DEPTH) }
1D Layered{ (1,TEXTURE1D_LAYERED_WIDTH), 0, - * (1,TEXTURE1D_LAYERED_LAYERS) }{ (1,SURFACE1D_LAYERED_WIDTH), 0, - * (1,SURFACE1D_LAYERED_LAYERS) }
2D Layered{ (1,TEXTURE2D_LAYERED_WIDTH), (1,TEXTURE2D_LAYERED_HEIGHT), - * (1,TEXTURE2D_LAYERED_LAYERS) }{ (1,SURFACE2D_LAYERED_WIDTH), (1,SURFACE2D_LAYERED_HEIGHT), - * (1,SURFACE2D_LAYERED_LAYERS) }
Cubemap{ (1,TEXTURECUBEMAP_WIDTH), (1,TEXTURECUBEMAP_WIDTH), 6 }{ (1,SURFACECUBEMAP_WIDTH), - * (1,SURFACECUBEMAP_WIDTH), 6 }
Cubemap Layered{ (1,TEXTURECUBEMAP_LAYERED_WIDTH), (1,TEXTURECUBEMAP_LAYERED_WIDTH), - * (1,TEXTURECUBEMAP_LAYERED_LAYERS) }{ (1,SURFACECUBEMAP_LAYERED_WIDTH), (1,SURFACECUBEMAP_LAYERED_WIDTH), - * (1,SURFACECUBEMAP_LAYERED_LAYERS) }
- * - * - * \param pHandle - Returned mipmapped array - * \param pMipmappedArrayDesc - mipmapped array descriptor - * \param numMipmapLevels - Number of mipmap levels - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_UNKNOWN - * \notefnerr - * - * \sa - * ::cuMipmappedArrayDestroy, - * ::cuMipmappedArrayGetLevel, - * ::cuArrayCreate, - * ::cudaMallocMipmappedArray - */ -CUresult CUDAAPI cuMipmappedArrayCreate(CUmipmappedArray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR *pMipmappedArrayDesc, unsigned int numMipmapLevels); - -/** - * \brief Gets a mipmap level of a CUDA mipmapped array - * - * Returns in \p *pLevelArray a CUDA array that represents a single mipmap level - * of the CUDA mipmapped array \p hMipmappedArray. - * - * If \p level is greater than the maximum number of levels in this mipmapped array, - * ::CUDA_ERROR_INVALID_VALUE is returned. - * - * \param pLevelArray - Returned mipmap level CUDA array - * \param hMipmappedArray - CUDA mipmapped array - * \param level - Mipmap level - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * - * \sa - * ::cuMipmappedArrayCreate, - * ::cuMipmappedArrayDestroy, - * ::cuArrayCreate, - * ::cudaGetMipmappedArrayLevel - */ -CUresult CUDAAPI cuMipmappedArrayGetLevel(CUarray *pLevelArray, CUmipmappedArray hMipmappedArray, unsigned int level); - -/** - * \brief Destroys a CUDA mipmapped array - * - * Destroys the CUDA mipmapped array \p hMipmappedArray. - * - * \param hMipmappedArray - Mipmapped array to destroy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_ARRAY_IS_MAPPED, - * ::CUDA_ERROR_CONTEXT_IS_DESTROYED - * \notefnerr - * - * \sa - * ::cuMipmappedArrayCreate, - * ::cuMipmappedArrayGetLevel, - * ::cuArrayCreate, - * ::cudaFreeMipmappedArray - */ -CUresult CUDAAPI cuMipmappedArrayDestroy(CUmipmappedArray hMipmappedArray); - -/** @} */ /* END CUDA_MEM */ - -/** - * \defgroup CUDA_VA Virtual Memory Management - * - * ___MANBRIEF___ virtual memory management functions of the low-level CUDA driver API - * (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the virtual memory management functions of the low-level CUDA - * driver application programming interface. - * - * @{ - */ - -/** -* \brief Allocate an address range reservation. -* -* Reserves a virtual address range based on the given parameters, giving -* the starting address of the range in \p ptr. This API requires a system that -* supports UVA. The size and address parameters must be a multiple of the -* host page size and the alignment must be a power of two or zero for default -* alignment. -* -* \param[out] ptr - Resulting pointer to start of virtual address range allocated -* \param[in] size - Size of the reserved virtual address range requested -* \param[in] alignment - Alignment of the reserved virtual address range requested -* \param[in] addr - Fixed starting address range requested -* \param[in] flags - Currently unused, must be zero -* \return -* ::CUDA_SUCCESS, -* ::CUDA_ERROR_INVALID_VALUE, -* ::CUDA_ERROR_OUT_OF_MEMORY, -* ::CUDA_ERROR_NOT_INITIALIZED, -* ::CUDA_ERROR_DEINITIALIZED, -* ::CUDA_ERROR_NOT_PERMITTED, -* ::CUDA_ERROR_NOT_SUPPORTED -* -* \sa ::cuMemAddressFree -*/ -CUresult CUDAAPI cuMemAddressReserve(CUdeviceptr *ptr, size_t size, size_t alignment, CUdeviceptr addr, unsigned long long flags); - -/** -* \brief Free an address range reservation. -* -* Frees a virtual address range reserved by cuMemAddressReserve. The size -* must match what was given to memAddressReserve and the ptr given must -* match what was returned from memAddressReserve. -* -* \param[in] ptr - Starting address of the virtual address range to free -* \param[in] size - Size of the virtual address region to free -* \return -* ::CUDA_SUCCESS, -* ::CUDA_ERROR_INVALID_VALUE, -* ::CUDA_ERROR_NOT_INITIALIZED, -* ::CUDA_ERROR_DEINITIALIZED, -* ::CUDA_ERROR_NOT_PERMITTED, -* ::CUDA_ERROR_NOT_SUPPORTED -* -* \sa ::cuMemAddressReserve -*/ -CUresult CUDAAPI cuMemAddressFree(CUdeviceptr ptr, size_t size); - -/** -* \brief Create a CUDA memory handle representing a memory allocation of a given size described by the given properties -* -* This creates a memory allocation on the target device specified through the -* \p prop structure. The created allocation will not have any device or host -* mappings. The generic memory \p handle for the allocation can be -* mapped to the address space of calling process via ::cuMemMap. This handle -* cannot be transmitted directly to other processes (see -* ::cuMemExportToShareableHandle). On Windows, the caller must also pass -* an LPSECURITYATTRIBUTE in \p prop to be associated with this handle which -* limits or allows access to this handle for a recipient process (see -* ::CUmemAllocationProp::win32HandleMetaData for more). The \p size of this -* allocation must be a multiple of the the value given via -* ::cuMemGetAllocationGranularity with the ::CU_MEM_ALLOC_GRANULARITY_MINIMUM -* flag. -* If ::CUmemAllocationProp::allocFlags::usage contains ::CU_MEM_CREATE_USAGE_TILE_POOL flag then -* the memory allocation is intended only to be used as backing tile pool for sparse CUDA arrays -* and sparse CUDA mipmapped arrays. -* (see ::cuMemMapArrayAsync). -* -* \param[out] handle - Value of handle returned. All operations on this allocation are to be performed using this handle. -* \param[in] size - Size of the allocation requested -* \param[in] prop - Properties of the allocation to create. -* \param[in] flags - flags for future use, must be zero now. -* \return -* ::CUDA_SUCCESS, -* ::CUDA_ERROR_INVALID_VALUE, -* ::CUDA_ERROR_OUT_OF_MEMORY, -* ::CUDA_ERROR_INVALID_DEVICE, -* ::CUDA_ERROR_NOT_INITIALIZED, -* ::CUDA_ERROR_DEINITIALIZED, -* ::CUDA_ERROR_NOT_PERMITTED, -* ::CUDA_ERROR_NOT_SUPPORTED -* \notefnerr -* -* \sa ::cuMemRelease, ::cuMemExportToShareableHandle, ::cuMemImportFromShareableHandle -*/ -CUresult CUDAAPI cuMemCreate(CUmemGenericAllocationHandle *handle, size_t size, const CUmemAllocationProp *prop, unsigned long long flags); - -/** -* \brief Release a memory handle representing a memory allocation which was previously allocated through cuMemCreate. -* -* Frees the memory that was allocated on a device through cuMemCreate. -* -* The memory allocation will be freed when all outstanding mappings to the memory -* are unmapped and when all outstanding references to the handle (including it's -* shareable counterparts) are also released. The generic memory handle can be -* freed when there are still outstanding mappings made with this handle. Each -* time a recipient process imports a shareable handle, it needs to pair it with -* ::cuMemRelease for the handle to be freed. If \p handle is not a valid handle -* the behavior is undefined. -* -* \param[in] handle Value of handle which was returned previously by cuMemCreate. -* \return -* ::CUDA_SUCCESS, -* ::CUDA_ERROR_INVALID_VALUE, -* ::CUDA_ERROR_NOT_INITIALIZED, -* ::CUDA_ERROR_DEINITIALIZED, -* ::CUDA_ERROR_NOT_PERMITTED, -* ::CUDA_ERROR_NOT_SUPPORTED -* \notefnerr -* -* \sa ::cuMemCreate -*/ -CUresult CUDAAPI cuMemRelease(CUmemGenericAllocationHandle handle); - -/** -* \brief Maps an allocation handle to a reserved virtual address range. -* -* Maps bytes of memory represented by \p handle starting from byte \p offset to -* \p size to address range [\p addr, \p addr + \p size]. This range must be an -* address reservation previously reserved with ::cuMemAddressReserve, and -* \p offset + \p size must be less than the size of the memory allocation. -* Both \p ptr, \p size, and \p offset must be a multiple of the value given via -* ::cuMemGetAllocationGranularity with the ::CU_MEM_ALLOC_GRANULARITY_MINIMUM flag. -* -* Please note calling ::cuMemMap does not make the address accessible, -* the caller needs to update accessibility of a contiguous mapped VA -* range by calling ::cuMemSetAccess. -* -* Once a recipient process obtains a shareable memory handle -* from ::cuMemImportFromShareableHandle, the process must -* use ::cuMemMap to map the memory into its address ranges before -* setting accessibility with ::cuMemSetAccess. -* -* ::cuMemMap can only create mappings on VA range reservations -* that are not currently mapped. -* -* \param[in] ptr - Address where memory will be mapped. -* \param[in] size - Size of the memory mapping. -* \param[in] offset - Offset into the memory represented by -* - \p handle from which to start mapping -* - Note: currently must be zero. -* \param[in] handle - Handle to a shareable memory -* \param[in] flags - flags for future use, must be zero now. -* \return -* ::CUDA_SUCCESS, -* ::CUDA_ERROR_INVALID_VALUE, -* ::CUDA_ERROR_INVALID_DEVICE, -* ::CUDA_ERROR_OUT_OF_MEMORY, -* ::CUDA_ERROR_NOT_INITIALIZED, -* ::CUDA_ERROR_DEINITIALIZED, -* ::CUDA_ERROR_NOT_PERMITTED, -* ::CUDA_ERROR_NOT_SUPPORTED -* \notefnerr -* -* \sa ::cuMemUnmap, ::cuMemSetAccess, ::cuMemCreate, ::cuMemAddressReserve, ::cuMemImportFromShareableHandle -*/ -CUresult CUDAAPI cuMemMap(CUdeviceptr ptr, size_t size, size_t offset, CUmemGenericAllocationHandle handle, unsigned long long flags); - -/** - * \brief Maps or unmaps subregions of sparse CUDA arrays and sparse CUDA mipmapped arrays - * - * Performs map or unmap operations on subregions of sparse CUDA arrays and sparse CUDA mipmapped arrays. - * Each operation is specified by a ::CUarrayMapInfo entry in the \p mapInfoList array of size \p count. - * The structure ::CUarrayMapInfo is defined as follow: - \code - typedef struct CUarrayMapInfo_st { - CUresourcetype resourceType; - union { - CUmipmappedArray mipmap; - CUarray array; - } resource; - - CUarraySparseSubresourceType subresourceType; - union { - struct { - unsigned int level; - unsigned int layer; - unsigned int offsetX; - unsigned int offsetY; - unsigned int offsetZ; - unsigned int extentWidth; - unsigned int extentHeight; - unsigned int extentDepth; - } sparseLevel; - struct { - unsigned int layer; - unsigned long long offset; - unsigned long long size; - } miptail; - } subresource; - - CUmemOperationType memOperationType; - - CUmemHandleType memHandleType; - union { - CUmemGenericAllocationHandle memHandle; - } memHandle; - - unsigned long long offset; - unsigned int deviceBitMask; - unsigned int flags; - unsigned int reserved[2]; - } CUarrayMapInfo; - \endcode - * - * where ::CUarrayMapInfo::resourceType specifies the type of resource to be operated on. - * If ::CUarrayMapInfo::resourceType is set to ::CUresourcetype::CU_RESOURCE_TYPE_ARRAY then - * ::CUarrayMapInfo::resource::array must be set to a valid sparse CUDA array handle. - * The CUDA array must be either a 2D, 2D layered or 3D CUDA array and must have been allocated using - * ::cuArrayCreate or ::cuArray3DCreate with the flag ::CUDA_ARRAY3D_SPARSE. - * For CUDA arrays obtained using ::cuMipmappedArrayGetLevel, ::CUDA_ERROR_INVALID_VALUE will be returned. - * If ::CUarrayMapInfo::resourceType is set to ::CUresourcetype::CU_RESOURCE_TYPE_MIPMAPPED_ARRAY - * then ::CUarrayMapInfo::resource::mipmap must be set to a valid sparse CUDA mipmapped array handle. - * The CUDA mipmapped array must be either a 2D, 2D layered or 3D CUDA mipmapped array and must have been - * allocated using ::cuMipmappedArrayCreate with the flag ::CUDA_ARRAY3D_SPARSE. - * - * ::CUarrayMapInfo::subresourceType specifies the type of subresource within the resource. - * ::CUarraySparseSubresourceType_enum is defined as: - \code - typedef enum CUarraySparseSubresourceType_enum { - CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL = 0, - CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL = 1 - } CUarraySparseSubresourceType; - \endcode - * - * where ::CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL indicates a - * sparse-miplevel which spans at least one tile in every dimension. The remaining miplevels which - * are too small to span at least one tile in any dimension constitute the mip tail region as indicated by - * ::CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL subresource type. - * - * If ::CUarrayMapInfo::subresourceType is set to ::CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_SPARSE_LEVEL - * then ::CUarrayMapInfo::subresource::sparseLevel struct must contain valid array subregion offsets and extents. - * The ::CUarrayMapInfo::subresource::sparseLevel::offsetX, ::CUarrayMapInfo::subresource::sparseLevel::offsetY - * and ::CUarrayMapInfo::subresource::sparseLevel::offsetZ must specify valid X, Y and Z offsets respectively. - * The ::CUarrayMapInfo::subresource::sparseLevel::extentWidth, ::CUarrayMapInfo::subresource::sparseLevel::extentHeight - * and ::CUarrayMapInfo::subresource::sparseLevel::extentDepth must specify valid width, height and depth extents respectively. - * These offsets and extents must be aligned to the corresponding tile dimension. - * For CUDA mipmapped arrays ::CUarrayMapInfo::subresource::sparseLevel::level must specify a valid mip level index. Otherwise, - * must be zero. - * For layered CUDA arrays and layered CUDA mipmapped arrays ::CUarrayMapInfo::subresource::sparseLevel::layer must specify a valid layer index. Otherwise, - * must be zero. - * ::CUarrayMapInfo::subresource::sparseLevel::offsetZ must be zero and ::CUarrayMapInfo::subresource::sparseLevel::extentDepth - * must be set to 1 for 2D and 2D layered CUDA arrays and CUDA mipmapped arrays. - * Tile extents can be obtained by calling ::cuArrayGetSparseProperties and ::cuMipmappedArrayGetSparseProperties - * - * If ::CUarrayMapInfo::subresourceType is set to ::CUarraySparseSubresourceType::CU_ARRAY_SPARSE_SUBRESOURCE_TYPE_MIPTAIL - * then ::CUarrayMapInfo::subresource::miptail struct must contain valid mip tail offset in - * ::CUarrayMapInfo::subresource::miptail::offset and size in ::CUarrayMapInfo::subresource::miptail::size. - * Both, mip tail offset and mip tail size must be aligned to the tile size. - * For layered CUDA mipmapped arrays which don't have the flag ::CU_ARRAY_SPARSE_PROPERTIES_SINGLE_MIPTAIL set in ::CUDA_ARRAY_SPARSE_PROPERTIES::flags - * as returned by ::cuMipmappedArrayGetSparseProperties, ::CUarrayMapInfo::subresource::miptail::layer must specify a valid layer index. - * Otherwise, must be zero. - * - * ::CUarrayMapInfo::memOperationType specifies the type of operation. ::CUmemOperationType is defined as: - \code - typedef enum CUmemOperationType_enum { - CU_MEM_OPERATION_TYPE_MAP = 1, - CU_MEM_OPERATION_TYPE_UNMAP = 2 - } CUmemOperationType; - \endcode - * If ::CUarrayMapInfo::memOperationType is set to ::CUmemOperationType::CU_MEM_OPERATION_TYPE_MAP then the subresource - * will be mapped onto the tile pool memory specified by ::CUarrayMapInfo::memHandle at offset ::CUarrayMapInfo::offset. - * The tile pool allocation has to be created by specifying the ::CU_MEM_CREATE_USAGE_TILE_POOL flag when calling ::cuMemCreate. Also, - * ::CUarrayMapInfo::memHandleType must be set to ::CUmemHandleType::CU_MEM_HANDLE_TYPE_GENERIC. - * - * If ::CUarrayMapInfo::memOperationType is set to ::CUmemOperationType::CU_MEM_OPERATION_TYPE_UNMAP then an unmapping operation - * is performed. ::CUarrayMapInfo::memHandle must be NULL. - * - * ::CUarrayMapInfo::deviceBitMask specifies the list of devices that must map or unmap physical memory. - * Currently, this mask must have exactly one bit set, and the corresponding device must match the device associated with the stream. - * If ::CUarrayMapInfo::memOperationType is set to ::CUmemOperationType::CU_MEM_OPERATION_TYPE_MAP, the device must also match - * the device associated with the tile pool memory allocation as specified by ::CUarrayMapInfo::memHandle. - * - * ::CUarrayMapInfo::flags and ::CUarrayMapInfo::reserved[] are unused and must be set to zero. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * - * \param[in] mapInfoList - List of ::CUarrayMapInfo - * \param[in] count - Count of ::CUarrayMapInfo in \p mapInfoList - * \param[in] hStream - Stream identifier for the stream to use for map or unmap operations - * - * \sa ::cuMipmappedArrayCreate, ::cuArrayCreate, ::cuArray3DCreate, ::cuMemCreate, ::cuArrayGetSparseProperties, ::cuMipmappedArrayGetSparseProperties - */ -CUresult CUDAAPI cuMemMapArrayAsync(CUarrayMapInfo *mapInfoList, unsigned int count, CUstream hStream); - -/** -* \brief Unmap the backing memory of a given address range. -* -* The range must be the entire contiguous address range that was mapped to. In -* other words, ::cuMemUnmap cannot unmap a sub-range of an address range mapped -* by ::cuMemCreate / ::cuMemMap. Any backing memory allocations will be freed -* if there are no existing mappings and there are no unreleased memory handles. -* -* When ::cuMemUnmap returns successfully the address range is converted to an -* address reservation and can be used for a future calls to ::cuMemMap. Any new -* mapping to this virtual address will need to have access granted through -* ::cuMemSetAccess, as all mappings start with no accessibility setup. -* -* \param[in] ptr - Starting address for the virtual address range to unmap -* \param[in] size - Size of the virtual address range to unmap -* \returns -* ::CUDA_SUCCESS, -* ::CUDA_ERROR_INVALID_VALUE, -* ::CUDA_ERROR_NOT_INITIALIZED, -* ::CUDA_ERROR_DEINITIALIZED, -* ::CUDA_ERROR_NOT_PERMITTED, -* ::CUDA_ERROR_NOT_SUPPORTED -* \notefnerr -* \note_sync -* -* \sa ::cuMemCreate, ::cuMemAddressReserve -*/ -CUresult CUDAAPI cuMemUnmap(CUdeviceptr ptr, size_t size); - -/** -* \brief Set the access flags for each location specified in \p desc for the given virtual address range -* -* Given the virtual address range via \p ptr and \p size, and the locations -* in the array given by \p desc and \p count, set the access flags for the -* target locations. The range must be a fully mapped address range -* containing all allocations created by ::cuMemMap / ::cuMemCreate. -* -* \param[in] ptr - Starting address for the virtual address range -* \param[in] size - Length of the virtual address range -* \param[in] desc - Array of ::CUmemAccessDesc that describe how to change the -* - mapping for each location specified -* \param[in] count - Number of ::CUmemAccessDesc in \p desc -* \returns -* ::CUDA_SUCCESS, -* ::CUDA_ERROR_INVALID_VALUE, -* ::CUDA_ERROR_INVALID_DEVICE, -* ::CUDA_ERROR_NOT_SUPPORTED -* \notefnerr -* \note_sync -* -* \sa ::cuMemSetAccess, ::cuMemCreate, :cuMemMap -*/ -CUresult CUDAAPI cuMemSetAccess(CUdeviceptr ptr, size_t size, const CUmemAccessDesc *desc, size_t count); - -/** -* \brief Get the access \p flags set for the given \p location and \p ptr -* -* \param[out] flags - Flags set for this location -* \param[in] location - Location in which to check the flags for -* \param[in] ptr - Address in which to check the access flags for -* \returns -* ::CUDA_SUCCESS, -* ::CUDA_ERROR_INVALID_VALUE, -* ::CUDA_ERROR_INVALID_DEVICE, -* ::CUDA_ERROR_NOT_INITIALIZED, -* ::CUDA_ERROR_DEINITIALIZED, -* ::CUDA_ERROR_NOT_PERMITTED, -* ::CUDA_ERROR_NOT_SUPPORTED -* -* \sa ::cuMemSetAccess -*/ -CUresult CUDAAPI cuMemGetAccess(unsigned long long *flags, const CUmemLocation *location, CUdeviceptr ptr); - -/** -* \brief Exports an allocation to a requested shareable handle type -* -* Given a CUDA memory handle, create a shareable memory -* allocation handle that can be used to share the memory with other -* processes. The recipient process can convert the shareable handle back into a -* CUDA memory handle using ::cuMemImportFromShareableHandle and map -* it with ::cuMemMap. The implementation of what this handle is and how it -* can be transferred is defined by the requested handle type in \p handleType -* -* Once all shareable handles are closed and the allocation is released, the allocated -* memory referenced will be released back to the OS and uses of the CUDA handle afterward -* will lead to undefined behavior. -* -* This API can also be used in conjunction with other APIs (e.g. Vulkan, OpenGL) -* that support importing memory from the shareable type -* -* \param[out] shareableHandle - Pointer to the location in which to store the requested handle type -* \param[in] handle - CUDA handle for the memory allocation -* \param[in] handleType - Type of shareable handle requested (defines type and size of the \p shareableHandle output parameter) -* \param[in] flags - Reserved, must be zero -* \returns -* ::CUDA_SUCCESS, -* ::CUDA_ERROR_INVALID_VALUE, -* ::CUDA_ERROR_NOT_INITIALIZED, -* ::CUDA_ERROR_DEINITIALIZED, -* ::CUDA_ERROR_NOT_PERMITTED, -* ::CUDA_ERROR_NOT_SUPPORTED -* -* \sa ::cuMemImportFromShareableHandle -*/ -CUresult CUDAAPI cuMemExportToShareableHandle(void *shareableHandle, CUmemGenericAllocationHandle handle, CUmemAllocationHandleType handleType, unsigned long long flags); - -/** -* \brief Imports an allocation from a requested shareable handle type. -* -* If the current process cannot support the memory described by this shareable -* handle, this API will error as CUDA_ERROR_NOT_SUPPORTED. -* -* \note Importing shareable handles exported from some graphics APIs(VUlkan, OpenGL, etc) -* created on devices under an SLI group may not be supported, and thus this API will -* return CUDA_ERROR_NOT_SUPPORTED. -* There is no guarantee that the contents of \p handle will be the same CUDA memory handle -* for the same given OS shareable handle, or the same underlying allocation. -* -* \param[out] handle - CUDA Memory handle for the memory allocation. -* \param[in] osHandle - Shareable Handle representing the memory allocation that is to be imported. -* \param[in] shHandleType - handle type of the exported handle ::CUmemAllocationHandleType. -* \returns -* ::CUDA_SUCCESS, -* ::CUDA_ERROR_INVALID_VALUE, -* ::CUDA_ERROR_NOT_INITIALIZED, -* ::CUDA_ERROR_DEINITIALIZED, -* ::CUDA_ERROR_NOT_PERMITTED, -* ::CUDA_ERROR_NOT_SUPPORTED -* -* \sa ::cuMemExportToShareableHandle, ::cuMemMap, ::cuMemRelease -*/ -CUresult CUDAAPI cuMemImportFromShareableHandle(CUmemGenericAllocationHandle *handle, void *osHandle, CUmemAllocationHandleType shHandleType); - -/** -* \brief Calculates either the minimal or recommended granularity -* -* Calculates either the minimal or recommended granularity -* for a given allocation specification and returns it in granularity. This -* granularity can be used as a multiple for alignment, size, or address mapping. -* -* \param[out] granularity Returned granularity. -* \param[in] prop Property for which to determine the granularity for -* \param[in] option Determines which granularity to return -* \returns -* ::CUDA_SUCCESS, -* ::CUDA_ERROR_INVALID_VALUE, -* ::CUDA_ERROR_NOT_INITIALIZED, -* ::CUDA_ERROR_DEINITIALIZED, -* ::CUDA_ERROR_NOT_PERMITTED, -* ::CUDA_ERROR_NOT_SUPPORTED -* -* \sa ::cuMemCreate, ::cuMemMap -*/ -CUresult CUDAAPI cuMemGetAllocationGranularity(size_t *granularity, const CUmemAllocationProp *prop, CUmemAllocationGranularity_flags option); - -/** -* \brief Retrieve the contents of the property structure defining properties for this handle -* -* \param[out] prop - Pointer to a properties structure which will hold the information about this handle -* \param[in] handle - Handle which to perform the query on -* \returns -* ::CUDA_SUCCESS, -* ::CUDA_ERROR_INVALID_VALUE, -* ::CUDA_ERROR_NOT_INITIALIZED, -* ::CUDA_ERROR_DEINITIALIZED, -* ::CUDA_ERROR_NOT_PERMITTED, -* ::CUDA_ERROR_NOT_SUPPORTED -* -* \sa ::cuMemCreate, ::cuMemImportFromShareableHandle -*/ -CUresult CUDAAPI cuMemGetAllocationPropertiesFromHandle(CUmemAllocationProp *prop, CUmemGenericAllocationHandle handle); - -/** -* \brief Given an address \p addr, returns the allocation handle of the backing memory allocation. -* -* The handle is guaranteed to be the same handle value used to map the memory. If the address -* requested is not mapped, the function will fail. The returned handle must be released with -* corresponding number of calls to ::cuMemRelease. -* -* \note The address \p addr, can be any address in a range previously mapped -* by ::cuMemMap, and not necessarily the start address. -* -* \param[out] handle CUDA Memory handle for the backing memory allocation. -* \param[in] addr Memory address to query, that has been mapped previously. -* \returns -* ::CUDA_SUCCESS, -* ::CUDA_ERROR_INVALID_VALUE, -* ::CUDA_ERROR_NOT_INITIALIZED, -* ::CUDA_ERROR_DEINITIALIZED, -* ::CUDA_ERROR_NOT_PERMITTED, -* ::CUDA_ERROR_NOT_SUPPORTED -* -* \sa ::cuMemCreate, ::cuMemRelease, ::cuMemMap -*/ -CUresult CUDAAPI cuMemRetainAllocationHandle(CUmemGenericAllocationHandle *handle, void *addr); - -/** @} */ /* END CUDA_VA */ - -/** - * \defgroup CUDA_MALLOC_ASYNC Stream Ordered Memory Allocator - * - * ___MANBRIEF___ Functions for performing allocation and free operations in stream order. - * Functions for controlling the behavior of the underlying allocator. - * (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the stream ordered memory allocator exposed by the - * low-level CUDA driver application programming interface. - * - * @{ - * - * \section CUDA_MALLOC_ASYNC_overview overview - * - * The asynchronous allocator allows the user to allocate and free in stream order. - * All asynchronous accesses of the allocation must happen between - * the stream executions of the allocation and the free. If the memory is accessed - * outside of the promised stream order, a use before allocation / use after free error - * will cause undefined behavior. - * - * The allocator is free to reallocate the memory as long as it can guarantee - * that compliant memory accesses will not overlap temporally. - * The allocator may refer to internal stream ordering as well as inter-stream dependencies - * (such as CUDA events and null stream dependencies) when establishing the temporal guarantee. - * The allocator may also insert inter-stream dependencies to establish the temporal guarantee. - * - * \section CUDA_MALLOC_ASYNC_support Supported Platforms - * - * Whether or not a device supports the integrated stream ordered memory allocator - * may be queried by calling ::cuDeviceGetAttribute() with the device attribute - * ::CU_DEVICE_ATTRIBUTE_MEMORY_POOLS_SUPPORTED - */ - -/** - * \brief Frees memory with stream ordered semantics - * - * Inserts a free operation into \p hStream. - * The allocation must not be accessed after stream execution reaches the free. - * After this API returns, accessing the memory from any subsequent work launched on the GPU - * or querying its pointer attributes results in undefined behavior. - * - * \note During stream capture, this function results in the creation of a free node and - * must therefore be passed the address of a graph allocation. - * - * \param dptr - memory to free - * \param hStream - The stream establishing the stream ordering contract. - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT (default stream specified with no current context), - * ::CUDA_ERROR_NOT_SUPPORTED - */ -CUresult CUDAAPI cuMemFreeAsync(CUdeviceptr dptr, CUstream hStream); - -/** - * \brief Allocates memory with stream ordered semantics - * - * Inserts an allocation operation into \p hStream. - * A pointer to the allocated memory is returned immediately in *dptr. - * The allocation must not be accessed until the the allocation operation completes. - * The allocation comes from the memory pool current to the stream's device. - * - * \note The default memory pool of a device contains device memory from that device. - * \note Basic stream ordering allows future work submitted into the same stream to use the allocation. - * Stream query, stream synchronize, and CUDA events can be used to guarantee that the allocation - * operation completes before work submitted in a separate stream runs. - * \note During stream capture, this function results in the creation of an allocation node. In this case, - * the allocation is owned by the graph instead of the memory pool. The memory pool's properties - * are used to set the node's creation parameters. - * - * \param[out] dptr - Returned device pointer - * \param[in] bytesize - Number of bytes to allocate - * \param[in] hStream - The stream establishing the stream ordering contract and the memory pool to allocate from - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT (default stream specified with no current context), - * ::CUDA_ERROR_NOT_SUPPORTED, - * ::CUDA_ERROR_OUT_OF_MEMORY - * - * \sa ::cuMemAllocFromPoolAsync, ::cuMemFreeAsync, ::cuDeviceSetMemPool, - * ::cuDeviceGetDefaultMemPool, ::cuDeviceGetMemPool, ::cuMemPoolCreate, - * ::cuMemPoolSetAccess, ::cuMemPoolSetAttribute - */ -CUresult CUDAAPI cuMemAllocAsync(CUdeviceptr *dptr, size_t bytesize, CUstream hStream); - -/** - * \brief Tries to release memory back to the OS - * - * Releases memory back to the OS until the pool contains fewer than minBytesToKeep - * reserved bytes, or there is no more memory that the allocator can safely release. - * The allocator cannot release OS allocations that back outstanding asynchronous allocations. - * The OS allocations may happen at different granularity from the user allocations. - * - * \note: Allocations that have not been freed count as outstanding. - * \note: Allocations that have been asynchronously freed but whose completion has - * not been observed on the host (eg. by a synchronize) can count as outstanding. - * - * \param[in] pool - The memory pool to trim - * \param[in] minBytesToKeep - If the pool has less than minBytesToKeep reserved, - * the TrimTo operation is a no-op. Otherwise the pool will be guaranteed to have - * at least minBytesToKeep bytes reserved after the operation. - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, - * ::cuDeviceGetMemPool, ::cuMemPoolCreate - */ -CUresult CUDAAPI cuMemPoolTrimTo(CUmemoryPool pool, size_t minBytesToKeep); - -/** - * \brief Sets attributes of a memory pool - * - * Supported attributes are: - * - ::CU_MEMPOOL_ATTR_RELEASE_THRESHOLD: (value type = cuuint64_t) - * Amount of reserved memory in bytes to hold onto before trying - * to release memory back to the OS. When more than the release - * threshold bytes of memory are held by the memory pool, the - * allocator will try to release memory back to the OS on the - * next call to stream, event or context synchronize. (default 0) - * - ::CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES: (value type = int) - * Allow ::cuMemAllocAsync to use memory asynchronously freed - * in another stream as long as a stream ordering dependency - * of the allocating stream on the free action exists. - * Cuda events and null stream interactions can create the required - * stream ordered dependencies. (default enabled) - * - ::CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC: (value type = int) - * Allow reuse of already completed frees when there is no dependency - * between the free and allocation. (default enabled) - * - ::CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES: (value type = int) - * Allow ::cuMemAllocAsync to insert new stream dependencies - * in order to establish the stream ordering required to reuse - * a piece of memory released by ::cuMemFreeAsync (default enabled). - * - ::CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: (value type = cuuint64_t) - * Reset the high watermark that tracks the amount of backing memory that was - * allocated for the memory pool. It is illegal to set this attribute to a non-zero value. - * - ::CU_MEMPOOL_ATTR_USED_MEM_HIGH: (value type = cuuint64_t) - * Reset the high watermark that tracks the amount of used memory that was - * allocated for the memory pool. - * - * \param[in] pool - The memory pool to modify - * \param[in] attr - The attribute to modify - * \param[in] value - Pointer to the value to assign - * - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, - * ::cuDeviceGetMemPool, ::cuMemPoolCreate - */ -CUresult CUDAAPI cuMemPoolSetAttribute(CUmemoryPool pool, CUmemPool_attribute attr, void *value); - -/** - * \brief Gets attributes of a memory pool - * - * Supported attributes are: - * - ::CU_MEMPOOL_ATTR_RELEASE_THRESHOLD: (value type = cuuint64_t) - * Amount of reserved memory in bytes to hold onto before trying - * to release memory back to the OS. When more than the release - * threshold bytes of memory are held by the memory pool, the - * allocator will try to release memory back to the OS on the - * next call to stream, event or context synchronize. (default 0) - * - ::CU_MEMPOOL_ATTR_REUSE_FOLLOW_EVENT_DEPENDENCIES: (value type = int) - * Allow ::cuMemAllocAsync to use memory asynchronously freed - * in another stream as long as a stream ordering dependency - * of the allocating stream on the free action exists. - * Cuda events and null stream interactions can create the required - * stream ordered dependencies. (default enabled) - * - ::CU_MEMPOOL_ATTR_REUSE_ALLOW_OPPORTUNISTIC: (value type = int) - * Allow reuse of already completed frees when there is no dependency - * between the free and allocation. (default enabled) - * - ::CU_MEMPOOL_ATTR_REUSE_ALLOW_INTERNAL_DEPENDENCIES: (value type = int) - * Allow ::cuMemAllocAsync to insert new stream dependencies - * in order to establish the stream ordering required to reuse - * a piece of memory released by ::cuMemFreeAsync (default enabled). - * - ::CU_MEMPOOL_ATTR_RESERVED_MEM_CURRENT: (value type = cuuint64_t) - * Amount of backing memory currently allocated for the mempool - * - ::CU_MEMPOOL_ATTR_RESERVED_MEM_HIGH: (value type = cuuint64_t) - * High watermark of backing memory allocated for the mempool since the - * last time it was reset. - * - ::CU_MEMPOOL_ATTR_USED_MEM_CURRENT: (value type = cuuint64_t) - * Amount of memory from the pool that is currently in use by the application. - * - ::CU_MEMPOOL_ATTR_USED_MEM_HIGH: (value type = cuuint64_t) - * High watermark of the amount of memory from the pool that was in use by the application. - * - * \param[in] pool - The memory pool to get attributes of - * \param[in] attr - The attribute to get - * \param[out] value - Retrieved value - * - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, - * ::cuDeviceGetMemPool, ::cuMemPoolCreate - */ -CUresult CUDAAPI cuMemPoolGetAttribute(CUmemoryPool pool, CUmemPool_attribute attr, void *value); - -/** - * \brief Controls visibility of pools between devices - * - * \param[in] pool - The pool being modified - * \param[in] map - Array of access descriptors. Each descriptor instructs the access to enable for a single gpu. - * \param[in] count - Number of descriptors in the map array. - * - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, - * ::cuDeviceGetMemPool, ::cuMemPoolCreate - */ -CUresult CUDAAPI cuMemPoolSetAccess(CUmemoryPool pool, const CUmemAccessDesc *map, size_t count); - -/** - * \brief Returns the accessibility of a pool from a device - * - * Returns the accessibility of the pool's memory from the specified location. - * - * \param[out] flags - the accessibility of the pool from the specified location - * \param[in] memPool - the pool being queried - * \param[in] location - the location accessing the pool - * - * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, - * ::cuDeviceGetMemPool, ::cuMemPoolCreate - */ -CUresult CUDAAPI cuMemPoolGetAccess(CUmemAccess_flags *flags, CUmemoryPool memPool, CUmemLocation *location); - -/** - * \brief Creates a memory pool - * - * Creates a CUDA memory pool and returns the handle in \p pool. The \p poolProps determines - * the properties of the pool such as the backing device and IPC capabilities. - * - * By default, the pool's memory will be accessible from the device it is allocated on. - * - * \note Specifying CU_MEM_HANDLE_TYPE_NONE creates a memory pool that will not support IPC. - * - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY, - * ::CUDA_ERROR_NOT_SUPPORTED - * - * \sa ::cuDeviceSetMemPool, ::cuDeviceGetMemPool, ::cuDeviceGetDefaultMemPool, - * ::cuMemAllocFromPoolAsync, ::cuMemPoolExportToShareableHandle - */ -CUresult CUDAAPI cuMemPoolCreate(CUmemoryPool *pool, const CUmemPoolProps *poolProps); - -/** - * \brief Destroys the specified memory pool - * - * If any pointers obtained from this pool haven't been freed or - * the pool has free operations that haven't completed - * when ::cuMemPoolDestroy is invoked, the function will return immediately and the - * resources associated with the pool will be released automatically - * once there are no more outstanding allocations. - * - * Destroying the current mempool of a device sets the default mempool of - * that device as the current mempool for that device. - * - * \note A device's default memory pool cannot be destroyed. - * - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuMemFreeAsync, ::cuDeviceSetMemPool, ::cuDeviceGetMemPool, - * ::cuDeviceGetDefaultMemPool, ::cuMemPoolCreate - */ -CUresult CUDAAPI cuMemPoolDestroy(CUmemoryPool pool); - -/** - * \brief Allocates memory from a specified pool with stream ordered semantics. - * - * Inserts an allocation operation into \p hStream. - * A pointer to the allocated memory is returned immediately in *dptr. - * The allocation must not be accessed until the the allocation operation completes. - * The allocation comes from the specified memory pool. - * - * \note - * - The specified memory pool may be from a device different than that of the specified \p hStream. - * - * - Basic stream ordering allows future work submitted into the same stream to use the allocation. - * Stream query, stream synchronize, and CUDA events can be used to guarantee that the allocation - * operation completes before work submitted in a separate stream runs. - * - * \note During stream capture, this function results in the creation of an allocation node. In this case, - * the allocation is owned by the graph instead of the memory pool. The memory pool's properties - * are used to set the node's creation parameters. - * - * \param[out] dptr - Returned device pointer - * \param[in] bytesize - Number of bytes to allocate - * \param[in] pool - The pool to allocate from - * \param[in] hStream - The stream establishing the stream ordering semantic - * - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT (default stream specified with no current context), - * ::CUDA_ERROR_NOT_SUPPORTED, - * ::CUDA_ERROR_OUT_OF_MEMORY - * - * \sa ::cuMemAllocAsync, ::cuMemFreeAsync, ::cuDeviceGetDefaultMemPool, - * ::cuDeviceGetMemPool, ::cuMemPoolCreate, ::cuMemPoolSetAccess, - * ::cuMemPoolSetAttribute - */ -CUresult CUDAAPI cuMemAllocFromPoolAsync(CUdeviceptr *dptr, size_t bytesize, CUmemoryPool pool, CUstream hStream); - -/** - * \brief Exports a memory pool to the requested handle type. - * - * Given an IPC capable mempool, create an OS handle to share the pool with another process. - * A recipient process can convert the shareable handle into a mempool with ::cuMemPoolImportFromShareableHandle. - * Individual pointers can then be shared with the ::cuMemPoolExportPointer and ::cuMemPoolImportPointer APIs. - * The implementation of what the shareable handle is and how it can be transferred is defined by the requested - * handle type. - * - * \note: To create an IPC capable mempool, create a mempool with a CUmemAllocationHandleType other than CU_MEM_HANDLE_TYPE_NONE. - * - * \param[out] handle_out - Returned OS handle - * \param[in] pool - pool to export - * \param[in] handleType - the type of handle to create - * \param[in] flags - must be 0 - * - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_OUT_OF_MEMORY - * - * \sa ::cuMemPoolImportFromShareableHandle, ::cuMemPoolExportPointer, - * ::cuMemPoolImportPointer, ::cuMemAllocAsync, ::cuMemFreeAsync, - * ::cuDeviceGetDefaultMemPool, ::cuDeviceGetMemPool, ::cuMemPoolCreate, - * ::cuMemPoolSetAccess, ::cuMemPoolSetAttribute - */ -CUresult CUDAAPI cuMemPoolExportToShareableHandle(void *handle_out, CUmemoryPool pool, CUmemAllocationHandleType handleType, unsigned long long flags); - -/** - * \brief imports a memory pool from a shared handle. - * - * Specific allocations can be imported from the imported pool with cuMemPoolImportPointer. - * - * \note Imported memory pools do not support creating new allocations. - * As such imported memory pools may not be used in cuDeviceSetMemPool - * or ::cuMemAllocFromPoolAsync calls. - * - * \param[out] pool_out - Returned memory pool - * \param[in] handle - OS handle of the pool to open - * \param[in] handleType - The type of handle being imported - * \param[in] flags - must be 0 - * - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_OUT_OF_MEMORY - * - * \sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolExportPointer, ::cuMemPoolImportPointer - */ -CUresult CUDAAPI cuMemPoolImportFromShareableHandle( - CUmemoryPool *pool_out, - void *handle, - CUmemAllocationHandleType handleType, - unsigned long long flags); - -/** - * \brief Export data to share a memory pool allocation between processes. - * - * Constructs \p shareData_out for sharing a specific allocation from an already shared memory pool. - * The recipient process can import the allocation with the ::cuMemPoolImportPointer api. - * The data is not a handle and may be shared through any IPC mechanism. - * - * \param[out] shareData_out - Returned export data - * \param[in] ptr - pointer to memory being exported - * - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_OUT_OF_MEMORY - * - * \sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolImportFromShareableHandle, ::cuMemPoolImportPointer - */ -CUresult CUDAAPI cuMemPoolExportPointer(CUmemPoolPtrExportData *shareData_out, CUdeviceptr ptr); - -/** - * \brief Import a memory pool allocation from another process. - * - * Returns in \p ptr_out a pointer to the imported memory. - * The imported memory must not be accessed before the allocation operation completes - * in the exporting process. The imported memory must be freed from all importing processes before - * being freed in the exporting process. The pointer may be freed with cuMemFree - * or cuMemFreeAsync. If cuMemFreeAsync is used, the free must be completed - * on the importing process before the free operation on the exporting process. - * - * \note The cuMemFreeAsync api may be used in the exporting process before - * the cuMemFreeAsync operation completes in its stream as long as the - * cuMemFreeAsync in the exporting process specifies a stream with - * a stream dependency on the importing process's cuMemFreeAsync. - * - * \param[out] ptr_out - pointer to imported memory - * \param[in] pool - pool from which to import - * \param[in] shareData - data specifying the memory to import - * - * \returns - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_OUT_OF_MEMORY - * - * \sa ::cuMemPoolExportToShareableHandle, ::cuMemPoolImportFromShareableHandle, ::cuMemPoolExportPointer - */ -CUresult CUDAAPI cuMemPoolImportPointer(CUdeviceptr *ptr_out, CUmemoryPool pool, CUmemPoolPtrExportData *shareData); - -/** @} */ /* END CUDA_MALLOC_ASYNC */ - -/** - * \defgroup CUDA_UNIFIED Unified Addressing - * - * ___MANBRIEF___ unified addressing functions of the low-level CUDA driver - * API (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the unified addressing functions of the - * low-level CUDA driver application programming interface. - * - * @{ - * - * \section CUDA_UNIFIED_overview Overview - * - * CUDA devices can share a unified address space with the host. - * For these devices there is no distinction between a device - * pointer and a host pointer -- the same pointer value may be - * used to access memory from the host program and from a kernel - * running on the device (with exceptions enumerated below). - * - * \section CUDA_UNIFIED_support Supported Platforms - * - * Whether or not a device supports unified addressing may be - * queried by calling ::cuDeviceGetAttribute() with the device - * attribute ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING. - * - * Unified addressing is automatically enabled in 64-bit processes - * - * \section CUDA_UNIFIED_lookup Looking Up Information from Pointer Values - * - * It is possible to look up information about the memory which backs a - * pointer value. For instance, one may want to know if a pointer points - * to host or device memory. As another example, in the case of device - * memory, one may want to know on which CUDA device the memory - * resides. These properties may be queried using the function - * ::cuPointerGetAttribute() - * - * Since pointers are unique, it is not necessary to specify information - * about the pointers specified to the various copy functions in the - * CUDA API. The function ::cuMemcpy() may be used to perform a copy - * between two pointers, ignoring whether they point to host or device - * memory (making ::cuMemcpyHtoD(), ::cuMemcpyDtoD(), and ::cuMemcpyDtoH() - * unnecessary for devices supporting unified addressing). For - * multidimensional copies, the memory type ::CU_MEMORYTYPE_UNIFIED may be - * used to specify that the CUDA driver should infer the location of the - * pointer from its value. - * - * \section CUDA_UNIFIED_automaphost Automatic Mapping of Host Allocated Host Memory - * - * All host memory allocated in all contexts using ::cuMemAllocHost() and - * ::cuMemHostAlloc() is always directly accessible from all contexts on - * all devices that support unified addressing. This is the case regardless - * of whether or not the flags ::CU_MEMHOSTALLOC_PORTABLE and - * ::CU_MEMHOSTALLOC_DEVICEMAP are specified. - * - * The pointer value through which allocated host memory may be accessed - * in kernels on all devices that support unified addressing is the same - * as the pointer value through which that memory is accessed on the host, - * so it is not necessary to call ::cuMemHostGetDevicePointer() to get the device - * pointer for these allocations. - * - * Note that this is not the case for memory allocated using the flag - * ::CU_MEMHOSTALLOC_WRITECOMBINED, as discussed below. - * - * \section CUDA_UNIFIED_autopeerregister Automatic Registration of Peer Memory - * - * Upon enabling direct access from a context that supports unified addressing - * to another peer context that supports unified addressing using - * ::cuCtxEnablePeerAccess() all memory allocated in the peer context using - * ::cuMemAlloc() and ::cuMemAllocPitch() will immediately be accessible - * by the current context. The device pointer value through - * which any peer memory may be accessed in the current context - * is the same pointer value through which that memory may be - * accessed in the peer context. - * - * \section CUDA_UNIFIED_exceptions Exceptions, Disjoint Addressing - * - * Not all memory may be accessed on devices through the same pointer - * value through which they are accessed on the host. These exceptions - * are host memory registered using ::cuMemHostRegister() and host memory - * allocated using the flag ::CU_MEMHOSTALLOC_WRITECOMBINED. For these - * exceptions, there exists a distinct host and device address for the - * memory. The device address is guaranteed to not overlap any valid host - * pointer range and is guaranteed to have the same value across all - * contexts that support unified addressing. - * - * This device address may be queried using ::cuMemHostGetDevicePointer() - * when a context using unified addressing is current. Either the host - * or the unified device pointer value may be used to refer to this memory - * through ::cuMemcpy() and similar functions using the - * ::CU_MEMORYTYPE_UNIFIED memory type. - * - */ - -/** - * \brief Returns information about a pointer - * - * The supported attributes are: - * - * - ::CU_POINTER_ATTRIBUTE_CONTEXT: - * - * Returns in \p *data the ::CUcontext in which \p ptr was allocated or - * registered. - * The type of \p data must be ::CUcontext *. - * - * If \p ptr was not allocated by, mapped by, or registered with - * a ::CUcontext which uses unified virtual addressing then - * ::CUDA_ERROR_INVALID_VALUE is returned. - * - * - ::CU_POINTER_ATTRIBUTE_MEMORY_TYPE: - * - * Returns in \p *data the physical memory type of the memory that - * \p ptr addresses as a ::CUmemorytype enumerated value. - * The type of \p data must be unsigned int. - * - * If \p ptr addresses device memory then \p *data is set to - * ::CU_MEMORYTYPE_DEVICE. The particular ::CUdevice on which the - * memory resides is the ::CUdevice of the ::CUcontext returned by the - * ::CU_POINTER_ATTRIBUTE_CONTEXT attribute of \p ptr. - * - * If \p ptr addresses host memory then \p *data is set to - * ::CU_MEMORYTYPE_HOST. - * - * If \p ptr was not allocated by, mapped by, or registered with - * a ::CUcontext which uses unified virtual addressing then - * ::CUDA_ERROR_INVALID_VALUE is returned. - * - * If the current ::CUcontext does not support unified virtual - * addressing then ::CUDA_ERROR_INVALID_CONTEXT is returned. - * - * - ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER: - * - * Returns in \p *data the device pointer value through which - * \p ptr may be accessed by kernels running in the current - * ::CUcontext. - * The type of \p data must be CUdeviceptr *. - * - * If there exists no device pointer value through which - * kernels running in the current ::CUcontext may access - * \p ptr then ::CUDA_ERROR_INVALID_VALUE is returned. - * - * If there is no current ::CUcontext then - * ::CUDA_ERROR_INVALID_CONTEXT is returned. - * - * Except in the exceptional disjoint addressing cases discussed - * below, the value returned in \p *data will equal the input - * value \p ptr. - * - * - ::CU_POINTER_ATTRIBUTE_HOST_POINTER: - * - * Returns in \p *data the host pointer value through which - * \p ptr may be accessed by by the host program. - * The type of \p data must be void **. - * If there exists no host pointer value through which - * the host program may directly access \p ptr then - * ::CUDA_ERROR_INVALID_VALUE is returned. - * - * Except in the exceptional disjoint addressing cases discussed - * below, the value returned in \p *data will equal the input - * value \p ptr. - * - * - ::CU_POINTER_ATTRIBUTE_P2P_TOKENS: - * - * Returns in \p *data two tokens for use with the nv-p2p.h Linux - * kernel interface. \p data must be a struct of type - * CUDA_POINTER_ATTRIBUTE_P2P_TOKENS. - * - * \p ptr must be a pointer to memory obtained from :cuMemAlloc(). - * Note that p2pToken and vaSpaceToken are only valid for the - * lifetime of the source allocation. A subsequent allocation at - * the same address may return completely different tokens. - * Querying this attribute has a side effect of setting the attribute - * ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS for the region of memory that - * \p ptr points to. - * - * - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS: - * - * A boolean attribute which when set, ensures that synchronous memory operations - * initiated on the region of memory that \p ptr points to will always synchronize. - * See further documentation in the section titled "API synchronization behavior" - * to learn more about cases when synchronous memory operations can - * exhibit asynchronous behavior. - * - * - ::CU_POINTER_ATTRIBUTE_BUFFER_ID: - * - * Returns in \p *data a buffer ID which is guaranteed to be unique within the process. - * \p data must point to an unsigned long long. - * - * \p ptr must be a pointer to memory obtained from a CUDA memory allocation API. - * Every memory allocation from any of the CUDA memory allocation APIs will - * have a unique ID over a process lifetime. Subsequent allocations do not reuse IDs - * from previous freed allocations. IDs are only unique within a single process. - * - * - * - ::CU_POINTER_ATTRIBUTE_IS_MANAGED: - * - * Returns in \p *data a boolean that indicates whether the pointer points to - * managed memory or not. - * - * If \p ptr is not a valid CUDA pointer then ::CUDA_ERROR_INVALID_VALUE is returned. - * - * - ::CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL: - * - * Returns in \p *data an integer representing a device ordinal of a device against - * which the memory was allocated or registered. - * - * - ::CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE: - * - * Returns in \p *data a boolean that indicates if this pointer maps to - * an allocation that is suitable for ::cudaIpcGetMemHandle. - * - * - ::CU_POINTER_ATTRIBUTE_RANGE_START_ADDR: - * - * Returns in \p *data the starting address for the allocation referenced - * by the device pointer \p ptr. Note that this is not necessarily the - * address of the mapped region, but the address of the mappable address - * range \p ptr references (e.g. from ::cuMemAddressReserve). - * - * - ::CU_POINTER_ATTRIBUTE_RANGE_SIZE: - * - * Returns in \p *data the size for the allocation referenced by the device - * pointer \p ptr. Note that this is not necessarily the size of the mapped - * region, but the size of the mappable address range \p ptr references - * (e.g. from ::cuMemAddressReserve). To retrieve the size of the mapped - * region, see ::cuMemGetAddressRange - * - * - ::CU_POINTER_ATTRIBUTE_MAPPED: - * - * Returns in \p *data a boolean that indicates if this pointer is in a - * valid address range that is mapped to a backing allocation. - * - * - ::CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES: - * - * Returns a bitmask of the allowed handle types for an allocation that may - * be passed to ::cuMemExportToShareableHandle. - * - * - ::CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE: - * - * Returns in \p *data the handle to the mempool that the allocation was obtained from. - * - * \par - * - * Note that for most allocations in the unified virtual address space - * the host and device pointer for accessing the allocation will be the - * same. The exceptions to this are - * - user memory registered using ::cuMemHostRegister - * - host memory allocated using ::cuMemHostAlloc with the - * ::CU_MEMHOSTALLOC_WRITECOMBINED flag - * For these types of allocation there will exist separate, disjoint host - * and device addresses for accessing the allocation. In particular - * - The host address will correspond to an invalid unmapped device address - * (which will result in an exception if accessed from the device) - * - The device address will correspond to an invalid unmapped host address - * (which will result in an exception if accessed from the host). - * For these types of allocations, querying ::CU_POINTER_ATTRIBUTE_HOST_POINTER - * and ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER may be used to retrieve the host - * and device addresses from either address. - * - * \param data - Returned pointer attribute value - * \param attribute - Pointer attribute to query - * \param ptr - Pointer - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa - * ::cuPointerSetAttribute, - * ::cuMemAlloc, - * ::cuMemFree, - * ::cuMemAllocHost, - * ::cuMemFreeHost, - * ::cuMemHostAlloc, - * ::cuMemHostRegister, - * ::cuMemHostUnregister, - * ::cudaPointerGetAttributes - */ -CUresult CUDAAPI cuPointerGetAttribute(void *data, CUpointer_attribute attribute, CUdeviceptr ptr); - -/** - * \brief Prefetches memory to the specified destination device - * - * Prefetches memory to the specified destination device. \p devPtr is the - * base device pointer of the memory to be prefetched and \p dstDevice is the - * destination device. \p count specifies the number of bytes to copy. \p hStream - * is the stream in which the operation is enqueued. The memory range must refer - * to managed memory allocated via ::cuMemAllocManaged or declared via __managed__ variables. - * - * Passing in CU_DEVICE_CPU for \p dstDevice will prefetch the data to host memory. If - * \p dstDevice is a GPU, then the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS - * must be non-zero. Additionally, \p hStream must be associated with a device that has a - * non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. - * - * The start address and end address of the memory range will be rounded down and rounded up - * respectively to be aligned to CPU page size before the prefetch operation is enqueued - * in the stream. - * - * If no physical memory has been allocated for this region, then this memory region - * will be populated and mapped on the destination device. If there's insufficient - * memory to prefetch the desired region, the Unified Memory driver may evict pages from other - * ::cuMemAllocManaged allocations to host memory in order to make room. Device memory - * allocated using ::cuMemAlloc or ::cuArrayCreate will not be evicted. - * - * By default, any mappings to the previous location of the migrated pages are removed and - * mappings for the new location are only setup on \p dstDevice. The exact behavior however - * also depends on the settings applied to this memory range via ::cuMemAdvise as described - * below: - * - * If ::CU_MEM_ADVISE_SET_READ_MOSTLY was set on any subset of this memory range, - * then that subset will create a read-only copy of the pages on \p dstDevice. - * - * If ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION was called on any subset of this memory - * range, then the pages will be migrated to \p dstDevice even if \p dstDevice is not the - * preferred location of any pages in the memory range. - * - * If ::CU_MEM_ADVISE_SET_ACCESSED_BY was called on any subset of this memory range, - * then mappings to those pages from all the appropriate processors are updated to - * refer to the new location if establishing such a mapping is possible. Otherwise, - * those mappings are cleared. - * - * Note that this API is not required for functionality and only serves to improve performance - * by allowing the application to migrate data to a suitable location before it is accessed. - * Memory accesses to this range are always coherent and are allowed even when the data is - * actively being migrated. - * - * Note that this function is asynchronous with respect to the host and all work - * on other devices. - * - * \param devPtr - Pointer to be prefetched - * \param count - Size in bytes - * \param dstDevice - Destination device to prefetch to - * \param hStream - Stream to enqueue prefetch operation - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * \note_async - * \note_null_stream - * - * \sa ::cuMemcpy, ::cuMemcpyPeer, ::cuMemcpyAsync, - * ::cuMemcpy3DPeerAsync, ::cuMemAdvise, - * ::cudaMemPrefetchAsync - */ -CUresult CUDAAPI cuMemPrefetchAsync(CUdeviceptr devPtr, size_t count, CUdevice dstDevice, CUstream hStream); - -/** - * \brief Advise about the usage of a given memory range - * - * Advise the Unified Memory subsystem about the usage pattern for the memory range - * starting at \p devPtr with a size of \p count bytes. The start address and end address of the memory - * range will be rounded down and rounded up respectively to be aligned to CPU page size before the - * advice is applied. The memory range must refer to managed memory allocated via ::cuMemAllocManaged - * or declared via __managed__ variables. The memory range could also refer to system-allocated pageable - * memory provided it represents a valid, host-accessible region of memory and all additional constraints - * imposed by \p advice as outlined below are also satisfied. Specifying an invalid system-allocated pageable - * memory range results in an error being returned. - * - * The \p advice parameter can take the following values: - * - ::CU_MEM_ADVISE_SET_READ_MOSTLY: This implies that the data is mostly going to be read - * from and only occasionally written to. Any read accesses from any processor to this region will create a - * read-only copy of at least the accessed pages in that processor's memory. Additionally, if ::cuMemPrefetchAsync - * is called on this region, it will create a read-only copy of the data on the destination processor. - * If any processor writes to this region, all copies of the corresponding page will be invalidated - * except for the one where the write occurred. The \p device argument is ignored for this advice. - * Note that for a page to be read-duplicated, the accessing processor must either be the CPU or a GPU - * that has a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. - * Also, if a context is created on a device that does not have the device attribute - * ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS set, then read-duplication will not occur until - * all such contexts are destroyed. - * If the memory region refers to valid system-allocated pageable memory, then the accessing device must - * have a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS for a read-only - * copy to be created on that device. Note however that if the accessing device also has a non-zero value for the - * device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, then setting this advice - * will not create a read-only copy when that device accesses this memory region. - * - * - ::CU_MEM_ADVISE_UNSET_READ_MOSTLY: Undoes the effect of ::CU_MEM_ADVISE_SET_READ_MOSTLY and also prevents the - * Unified Memory driver from attempting heuristic read-duplication on the memory range. Any read-duplicated - * copies of the data will be collapsed into a single copy. The location for the collapsed - * copy will be the preferred location if the page has a preferred location and one of the read-duplicated - * copies was resident at that location. Otherwise, the location chosen is arbitrary. - * - * - ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION: This advice sets the preferred location for the - * data to be the memory belonging to \p device. Passing in CU_DEVICE_CPU for \p device sets the - * preferred location as host memory. If \p device is a GPU, then it must have a non-zero value for the - * device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. Setting the preferred location - * does not cause data to migrate to that location immediately. Instead, it guides the migration policy - * when a fault occurs on that memory region. If the data is already in its preferred location and the - * faulting processor can establish a mapping without requiring the data to be migrated, then - * data migration will be avoided. On the other hand, if the data is not in its preferred location - * or if a direct mapping cannot be established, then it will be migrated to the processor accessing - * it. It is important to note that setting the preferred location does not prevent data prefetching - * done using ::cuMemPrefetchAsync. - * Having a preferred location can override the page thrash detection and resolution logic in the Unified - * Memory driver. Normally, if a page is detected to be constantly thrashing between for example host and device - * memory, the page may eventually be pinned to host memory by the Unified Memory driver. But - * if the preferred location is set as device memory, then the page will continue to thrash indefinitely. - * If ::CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory region or any subset of it, then the - * policies associated with that advice will override the policies of this advice, unless read accesses from - * \p device will not result in a read-only copy being created on that device as outlined in description for - * the advice ::CU_MEM_ADVISE_SET_READ_MOSTLY. - * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero - * value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, if \p device has - * a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, - * then this call has no effect. Note however that this behavior may change in the future. - * - * - ::CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION: Undoes the effect of ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION - * and changes the preferred location to none. - * - * - ::CU_MEM_ADVISE_SET_ACCESSED_BY: This advice implies that the data will be accessed by \p device. - * Passing in ::CU_DEVICE_CPU for \p device will set the advice for the CPU. If \p device is a GPU, then - * the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS must be non-zero. - * This advice does not cause data migration and has no impact on the location of the data per se. Instead, - * it causes the data to always be mapped in the specified processor's page tables, as long as the - * location of the data permits a mapping to be established. If the data gets migrated for any reason, - * the mappings are updated accordingly. - * This advice is recommended in scenarios where data locality is not important, but avoiding faults is. - * Consider for example a system containing multiple GPUs with peer-to-peer access enabled, where the - * data located on one GPU is occasionally accessed by peer GPUs. In such scenarios, migrating data - * over to the other GPUs is not as important because the accesses are infrequent and the overhead of - * migration may be too high. But preventing faults can still help improve performance, and so having - * a mapping set up in advance is useful. Note that on CPU access of this data, the data may be migrated - * to host memory because the CPU typically cannot access device memory directly. Any GPU that had the - * ::CU_MEM_ADVISE_SET_ACCESSED_BY flag set for this data will now have its mapping updated to point to the - * page in host memory. - * If ::CU_MEM_ADVISE_SET_READ_MOSTLY is also set on this memory region or any subset of it, then the - * policies associated with that advice will override the policies of this advice. Additionally, if the - * preferred location of this memory region or any subset of it is also \p device, then the policies - * associated with ::CU_MEM_ADVISE_SET_PREFERRED_LOCATION will override the policies of this advice. - * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero - * value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, if \p device has - * a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, - * then this call has no effect. - * - * - ::CU_MEM_ADVISE_UNSET_ACCESSED_BY: Undoes the effect of ::CU_MEM_ADVISE_SET_ACCESSED_BY. Any mappings to - * the data from \p device may be removed at any time causing accesses to result in non-fatal page faults. - * If the memory region refers to valid system-allocated pageable memory, then \p device must have a non-zero - * value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. Additionally, if \p device has - * a non-zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES, - * then this call has no effect. - * - * \param devPtr - Pointer to memory to set the advice for - * \param count - Size in bytes of the memory range - * \param advice - Advice to be applied for the specified memory range - * \param device - Device to apply the advice for - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * \note_async - * \note_null_stream - * - * \sa ::cuMemcpy, ::cuMemcpyPeer, ::cuMemcpyAsync, - * ::cuMemcpy3DPeerAsync, ::cuMemPrefetchAsync, - * ::cudaMemAdvise - */ -CUresult CUDAAPI cuMemAdvise(CUdeviceptr devPtr, size_t count, CUmem_advise advice, CUdevice device); - -/** - * \brief Query an attribute of a given memory range - * - * Query an attribute about the memory range starting at \p devPtr with a size of \p count bytes. The - * memory range must refer to managed memory allocated via ::cuMemAllocManaged or declared via - * __managed__ variables. - * - * The \p attribute parameter can take the following values: - * - ::CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY: If this attribute is specified, \p data will be interpreted - * as a 32-bit integer, and \p dataSize must be 4. The result returned will be 1 if all pages in the given - * memory range have read-duplication enabled, or 0 otherwise. - * - ::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION: If this attribute is specified, \p data will be - * interpreted as a 32-bit integer, and \p dataSize must be 4. The result returned will be a GPU device - * id if all pages in the memory range have that GPU as their preferred location, or it will be CU_DEVICE_CPU - * if all pages in the memory range have the CPU as their preferred location, or it will be CU_DEVICE_INVALID - * if either all the pages don't have the same preferred location or some of the pages don't have a - * preferred location at all. Note that the actual location of the pages in the memory range at the time of - * the query may be different from the preferred location. - * - ::CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY: If this attribute is specified, \p data will be interpreted - * as an array of 32-bit integers, and \p dataSize must be a non-zero multiple of 4. The result returned - * will be a list of device ids that had ::CU_MEM_ADVISE_SET_ACCESSED_BY set for that entire memory range. - * If any device does not have that advice set for the entire memory range, that device will not be included. - * If \p data is larger than the number of devices that have that advice set for that memory range, - * CU_DEVICE_INVALID will be returned in all the extra space provided. For ex., if \p dataSize is 12 - * (i.e. \p data has 3 elements) and only device 0 has the advice set, then the result returned will be - * { 0, CU_DEVICE_INVALID, CU_DEVICE_INVALID }. If \p data is smaller than the number of devices that have - * that advice set, then only as many devices will be returned as can fit in the array. There is no - * guarantee on which specific devices will be returned, however. - * - ::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION: If this attribute is specified, \p data will be - * interpreted as a 32-bit integer, and \p dataSize must be 4. The result returned will be the last location - * to which all pages in the memory range were prefetched explicitly via ::cuMemPrefetchAsync. This will either be - * a GPU id or CU_DEVICE_CPU depending on whether the last location for prefetch was a GPU or the CPU - * respectively. If any page in the memory range was never explicitly prefetched or if all pages were not - * prefetched to the same location, CU_DEVICE_INVALID will be returned. Note that this simply returns the - * last location that the application requested to prefetch the memory range to. It gives no indication as to - * whether the prefetch operation to that location has completed or even begun. - * - * \param data - A pointers to a memory location where the result - * of each attribute query will be written to. - * \param dataSize - Array containing the size of data - * \param attribute - The attribute to query - * \param devPtr - Start of the range to query - * \param count - Size of the range to query - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * \note_async - * \note_null_stream - * - * \sa ::cuMemRangeGetAttributes, ::cuMemPrefetchAsync, - * ::cuMemAdvise, - * ::cudaMemRangeGetAttribute - */ -CUresult CUDAAPI cuMemRangeGetAttribute(void *data, size_t dataSize, CUmem_range_attribute attribute, CUdeviceptr devPtr, size_t count); - -/** - * \brief Query attributes of a given memory range. - * - * Query attributes of the memory range starting at \p devPtr with a size of \p count bytes. The - * memory range must refer to managed memory allocated via ::cuMemAllocManaged or declared via - * __managed__ variables. The \p attributes array will be interpreted to have \p numAttributes - * entries. The \p dataSizes array will also be interpreted to have \p numAttributes entries. - * The results of the query will be stored in \p data. - * - * The list of supported attributes are given below. Please refer to ::cuMemRangeGetAttribute for - * attribute descriptions and restrictions. - * - * - ::CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY - * - ::CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION - * - ::CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY - * - ::CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION - * - * \param data - A two-dimensional array containing pointers to memory - * locations where the result of each attribute query will be written to. - * \param dataSizes - Array containing the sizes of each result - * \param attributes - An array of attributes to query - * (numAttributes and the number of attributes in this array should match) - * \param numAttributes - Number of attributes to query - * \param devPtr - Start of the range to query - * \param count - Size of the range to query - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa ::cuMemRangeGetAttribute, ::cuMemAdvise, - * ::cuMemPrefetchAsync, - * ::cudaMemRangeGetAttributes - */ -CUresult CUDAAPI cuMemRangeGetAttributes(void **data, size_t *dataSizes, CUmem_range_attribute *attributes, size_t numAttributes, CUdeviceptr devPtr, size_t count); - -/** - * \brief Set attributes on a previously allocated memory region - * - * The supported attributes are: - * - * - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS: - * - * A boolean attribute that can either be set (1) or unset (0). When set, - * the region of memory that \p ptr points to is guaranteed to always synchronize - * memory operations that are synchronous. If there are some previously initiated - * synchronous memory operations that are pending when this attribute is set, the - * function does not return until those memory operations are complete. - * See further documentation in the section titled "API synchronization behavior" - * to learn more about cases when synchronous memory operations can - * exhibit asynchronous behavior. - * \p value will be considered as a pointer to an unsigned integer to which this attribute is to be set. - * - * \param value - Pointer to memory containing the value to be set - * \param attribute - Pointer attribute to set - * \param ptr - Pointer to a memory region allocated using CUDA memory allocation APIs - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa ::cuPointerGetAttribute, - * ::cuPointerGetAttributes, - * ::cuMemAlloc, - * ::cuMemFree, - * ::cuMemAllocHost, - * ::cuMemFreeHost, - * ::cuMemHostAlloc, - * ::cuMemHostRegister, - * ::cuMemHostUnregister - */ -CUresult CUDAAPI cuPointerSetAttribute(const void *value, CUpointer_attribute attribute, CUdeviceptr ptr); - -/** - * \brief Returns information about a pointer. - * - * The supported attributes are (refer to ::cuPointerGetAttribute for attribute descriptions and restrictions): - * - * - ::CU_POINTER_ATTRIBUTE_CONTEXT - * - ::CU_POINTER_ATTRIBUTE_MEMORY_TYPE - * - ::CU_POINTER_ATTRIBUTE_DEVICE_POINTER - * - ::CU_POINTER_ATTRIBUTE_HOST_POINTER - * - ::CU_POINTER_ATTRIBUTE_SYNC_MEMOPS - * - ::CU_POINTER_ATTRIBUTE_BUFFER_ID - * - ::CU_POINTER_ATTRIBUTE_IS_MANAGED - * - ::CU_POINTER_ATTRIBUTE_DEVICE_ORDINAL - * - ::CU_POINTER_ATTRIBUTE_RANGE_START_ADDR - * - ::CU_POINTER_ATTRIBUTE_RANGE_SIZE - * - ::CU_POINTER_ATTRIBUTE_MAPPED - * - ::CU_POINTER_ATTRIBUTE_IS_LEGACY_CUDA_IPC_CAPABLE - * - ::CU_POINTER_ATTRIBUTE_ALLOWED_HANDLE_TYPES - * - ::CU_POINTER_ATTRIBUTE_MEMPOOL_HANDLE - * - * \param numAttributes - Number of attributes to query - * \param attributes - An array of attributes to query - * (numAttributes and the number of attributes in this array should match) - * \param data - A two-dimensional array containing pointers to memory - * locations where the result of each attribute query will be written to. - * \param ptr - Pointer to query - * - * Unlike ::cuPointerGetAttribute, this function will not return an error when the \p ptr - * encountered is not a valid CUDA pointer. Instead, the attributes are assigned default NULL values - * and CUDA_SUCCESS is returned. - * - * If \p ptr was not allocated by, mapped by, or registered with a ::CUcontext which uses UVA - * (Unified Virtual Addressing), ::CUDA_ERROR_INVALID_CONTEXT is returned. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa - * ::cuPointerGetAttribute, - * ::cuPointerSetAttribute, - * ::cudaPointerGetAttributes - */ -CUresult CUDAAPI cuPointerGetAttributes(unsigned int numAttributes, CUpointer_attribute *attributes, void **data, CUdeviceptr ptr); - -/** @} */ /* END CUDA_UNIFIED */ - -/** - * \defgroup CUDA_STREAM Stream Management - * - * ___MANBRIEF___ stream management functions of the low-level CUDA driver API - * (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the stream management functions of the low-level CUDA - * driver application programming interface. - * - * @{ - */ - -/** - * \brief Create a stream - * - * Creates a stream and returns a handle in \p phStream. The \p Flags argument - * determines behaviors of the stream. - * - * Valid values for \p Flags are: - * - ::CU_STREAM_DEFAULT: Default stream creation flag. - * - ::CU_STREAM_NON_BLOCKING: Specifies that work running in the created - * stream may run concurrently with work in stream 0 (the NULL stream), and that - * the created stream should perform no implicit synchronization with stream 0. - * - * \param phStream - Returned newly created stream - * \param Flags - Parameters for stream creation - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * \notefnerr - * - * \sa ::cuStreamDestroy, - * ::cuStreamCreateWithPriority, - * ::cuStreamGetPriority, - * ::cuStreamGetFlags, - * ::cuStreamWaitEvent, - * ::cuStreamQuery, - * ::cuStreamSynchronize, - * ::cuStreamAddCallback, - * ::cudaStreamCreate, - * ::cudaStreamCreateWithFlags - */ -CUresult CUDAAPI cuStreamCreate(CUstream *phStream, unsigned int Flags); - -/** - * \brief Create a stream with the given priority - * - * Creates a stream with the specified priority and returns a handle in \p phStream. - * This API alters the scheduler priority of work in the stream. Work in a higher - * priority stream may preempt work already executing in a low priority stream. - * - * \p priority follows a convention where lower numbers represent higher priorities. - * '0' represents default priority. The range of meaningful numerical priorities can - * be queried using ::cuCtxGetStreamPriorityRange. If the specified priority is - * outside the numerical range returned by ::cuCtxGetStreamPriorityRange, - * it will automatically be clamped to the lowest or the highest number in the range. - * - * \param phStream - Returned newly created stream - * \param flags - Flags for stream creation. See ::cuStreamCreate for a list of - * valid flags - * \param priority - Stream priority. Lower numbers represent higher priorities. - * See ::cuCtxGetStreamPriorityRange for more information about - * meaningful stream priorities that can be passed. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * \notefnerr - * - * \note Stream priorities are supported only on GPUs - * with compute capability 3.5 or higher. - * - * \note In the current implementation, only compute kernels launched in - * priority streams are affected by the stream's priority. Stream priorities have - * no effect on host-to-device and device-to-host memory operations. - * - * \sa ::cuStreamDestroy, - * ::cuStreamCreate, - * ::cuStreamGetPriority, - * ::cuCtxGetStreamPriorityRange, - * ::cuStreamGetFlags, - * ::cuStreamWaitEvent, - * ::cuStreamQuery, - * ::cuStreamSynchronize, - * ::cuStreamAddCallback, - * ::cudaStreamCreateWithPriority - */ -CUresult CUDAAPI cuStreamCreateWithPriority(CUstream *phStream, unsigned int flags, int priority); - - -/** - * \brief Query the priority of a given stream - * - * Query the priority of a stream created using ::cuStreamCreate or ::cuStreamCreateWithPriority - * and return the priority in \p priority. Note that if the stream was created with a - * priority outside the numerical range returned by ::cuCtxGetStreamPriorityRange, - * this function returns the clamped priority. - * See ::cuStreamCreateWithPriority for details about priority clamping. - * - * \param hStream - Handle to the stream to be queried - * \param priority - Pointer to a signed integer in which the stream's priority is returned - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * \notefnerr - * - * \sa ::cuStreamDestroy, - * ::cuStreamCreate, - * ::cuStreamCreateWithPriority, - * ::cuCtxGetStreamPriorityRange, - * ::cuStreamGetFlags, - * ::cudaStreamGetPriority - */ -CUresult CUDAAPI cuStreamGetPriority(CUstream hStream, int *priority); - -/** - * \brief Query the flags of a given stream - * - * Query the flags of a stream created using ::cuStreamCreate or ::cuStreamCreateWithPriority - * and return the flags in \p flags. - * - * \param hStream - Handle to the stream to be queried - * \param flags - Pointer to an unsigned integer in which the stream's flags are returned - * The value returned in \p flags is a logical 'OR' of all flags that - * were used while creating this stream. See ::cuStreamCreate for the list - * of valid flags - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * \notefnerr - * - * \sa ::cuStreamDestroy, - * ::cuStreamCreate, - * ::cuStreamGetPriority, - * ::cudaStreamGetFlags - */ -CUresult CUDAAPI cuStreamGetFlags(CUstream hStream, unsigned int *flags); - -/** - * \brief Query the context associated with a stream - * - * Returns the CUDA context that the stream is associated with. - * - * The stream handle \p hStream can refer to any of the following: - *
    - *
  • a stream created via any of the CUDA driver APIs such as ::cuStreamCreate - * and ::cuStreamCreateWithPriority, or their runtime API equivalents such as - * ::cudaStreamCreate, ::cudaStreamCreateWithFlags and ::cudaStreamCreateWithPriority. - * The returned context is the context that was active in the calling thread when the - * stream was created. Passing an invalid handle will result in undefined behavior.
  • - *
  • any of the special streams such as the NULL stream, ::CU_STREAM_LEGACY and - * ::CU_STREAM_PER_THREAD. The runtime API equivalents of these are also accepted, - * which are NULL, ::cudaStreamLegacy and ::cudaStreamPerThread respectively. - * Specifying any of the special handles will return the context current to the - * calling thread. If no context is current to the calling thread, - * ::CUDA_ERROR_INVALID_CONTEXT is returned.
  • - *
- * - * \param hStream - Handle to the stream to be queried - * \param pctx - Returned context associated with the stream - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * \notefnerr - * - * \sa ::cuStreamDestroy, - * ::cuStreamCreateWithPriority, - * ::cuStreamGetPriority, - * ::cuStreamGetFlags, - * ::cuStreamWaitEvent, - * ::cuStreamQuery, - * ::cuStreamSynchronize, - * ::cuStreamAddCallback, - * ::cudaStreamCreate, - * ::cudaStreamCreateWithFlags - */ -CUresult CUDAAPI cuStreamGetCtx(CUstream hStream, CUcontext *pctx); - -/** - * \brief Make a compute stream wait on an event - * - * Makes all future work submitted to \p hStream wait for all work captured in - * \p hEvent. See ::cuEventRecord() for details on what is captured by an event. - * The synchronization will be performed efficiently on the device when applicable. - * \p hEvent may be from a different context or device than \p hStream. - * - * flags include: - * - ::CU_EVENT_WAIT_DEFAULT: Default event creation flag. - * - ::CU_EVENT_WAIT_EXTERNAL: Event is captured in the graph as an external - * event node when performing stream capture. This flag is invalid outside - * of stream capture. - * - * \param hStream - Stream to wait - * \param hEvent - Event to wait on (may not be NULL) - * \param Flags - See ::CUevent_capture_flags - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * \note_null_stream - * \notefnerr - * - * \sa ::cuStreamCreate, - * ::cuEventRecord, - * ::cuStreamQuery, - * ::cuStreamSynchronize, - * ::cuStreamAddCallback, - * ::cuStreamDestroy, - * ::cudaStreamWaitEvent - */ -CUresult CUDAAPI cuStreamWaitEvent(CUstream hStream, CUevent hEvent, unsigned int Flags); - -/** - * \brief Add a callback to a compute stream - * - * \note This function is slated for eventual deprecation and removal. If - * you do not require the callback to execute in case of a device error, - * consider using ::cuLaunchHostFunc. Additionally, this function is not - * supported with ::cuStreamBeginCapture and ::cuStreamEndCapture, unlike - * ::cuLaunchHostFunc. - * - * Adds a callback to be called on the host after all currently enqueued - * items in the stream have completed. For each - * cuStreamAddCallback call, the callback will be executed exactly once. - * The callback will block later work in the stream until it is finished. - * - * The callback may be passed ::CUDA_SUCCESS or an error code. In the event - * of a device error, all subsequently executed callbacks will receive an - * appropriate ::CUresult. - * - * Callbacks must not make any CUDA API calls. Attempting to use a CUDA API - * will result in ::CUDA_ERROR_NOT_PERMITTED. Callbacks must not perform any - * synchronization that may depend on outstanding device work or other callbacks - * that are not mandated to run earlier. Callbacks without a mandated order - * (in independent streams) execute in undefined order and may be serialized. - * - * For the purposes of Unified Memory, callback execution makes a number of - * guarantees: - *
    - *
  • The callback stream is considered idle for the duration of the - * callback. Thus, for example, a callback may always use memory attached - * to the callback stream.
  • - *
  • The start of execution of a callback has the same effect as - * synchronizing an event recorded in the same stream immediately prior to - * the callback. It thus synchronizes streams which have been "joined" - * prior to the callback.
  • - *
  • Adding device work to any stream does not have the effect of making - * the stream active until all preceding host functions and stream callbacks - * have executed. Thus, for - * example, a callback might use global attached memory even if work has - * been added to another stream, if the work has been ordered behind the - * callback with an event.
  • - *
  • Completion of a callback does not cause a stream to become - * active except as described above. The callback stream will remain idle - * if no device work follows the callback, and will remain idle across - * consecutive callbacks without device work in between. Thus, for example, - * stream synchronization can be done by signaling from a callback at the - * end of the stream.
  • - *
- * - * \param hStream - Stream to add callback to - * \param callback - The function to call once preceding stream operations are complete - * \param userData - User specified data to be passed to the callback function - * \param flags - Reserved for future use, must be 0 - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_NOT_SUPPORTED - * \note_null_stream - * \notefnerr - * - * \sa ::cuStreamCreate, - * ::cuStreamQuery, - * ::cuStreamSynchronize, - * ::cuStreamWaitEvent, - * ::cuStreamDestroy, - * ::cuMemAllocManaged, - * ::cuStreamAttachMemAsync, - * ::cuStreamLaunchHostFunc, - * ::cudaStreamAddCallback - */ -CUresult CUDAAPI cuStreamAddCallback(CUstream hStream, CUstreamCallback callback, void *userData, unsigned int flags); - -/** - * \brief Begins graph capture on a stream - * - * Begin graph capture on \p hStream. When a stream is in capture mode, all operations - * pushed into the stream will not be executed, but will instead be captured into - * a graph, which will be returned via ::cuStreamEndCapture. Capture may not be initiated - * if \p stream is CU_STREAM_LEGACY. Capture must be ended on the same stream in which - * it was initiated, and it may only be initiated if the stream is not already in capture - * mode. The capture mode may be queried via ::cuStreamIsCapturing. A unique id - * representing the capture sequence may be queried via ::cuStreamGetCaptureInfo. - * - * If \p mode is not ::CU_STREAM_CAPTURE_MODE_RELAXED, ::cuStreamEndCapture must be - * called on this stream from the same thread. - * - * \param hStream - Stream in which to initiate capture - * \param mode - Controls the interaction of this capture sequence with other API - * calls that are potentially unsafe. For more details see - * ::cuThreadExchangeStreamCaptureMode. - * - * \note Kernels captured using this API must not use texture and surface references. - * Reading or writing through any texture or surface reference is undefined - * behavior. This restriction does not apply to texture and surface objects. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa - * ::cuStreamCreate, - * ::cuStreamIsCapturing, - * ::cuStreamEndCapture, - * ::cuThreadExchangeStreamCaptureMode - */ -CUresult CUDAAPI cuStreamBeginCapture(CUstream hStream, CUstreamCaptureMode mode); - -/** - * \brief Swaps the stream capture interaction mode for a thread - * - * Sets the calling thread's stream capture interaction mode to the value contained - * in \p *mode, and overwrites \p *mode with the previous mode for the thread. To - * facilitate deterministic behavior across function or module boundaries, callers - * are encouraged to use this API in a push-pop fashion: \code - CUstreamCaptureMode mode = desiredMode; - cuThreadExchangeStreamCaptureMode(&mode); - ... - cuThreadExchangeStreamCaptureMode(&mode); // restore previous mode - * \endcode - * - * During stream capture (see ::cuStreamBeginCapture), some actions, such as a call - * to ::cudaMalloc, may be unsafe. In the case of ::cudaMalloc, the operation is - * not enqueued asynchronously to a stream, and is not observed by stream capture. - * Therefore, if the sequence of operations captured via ::cuStreamBeginCapture - * depended on the allocation being replayed whenever the graph is launched, the - * captured graph would be invalid. - * - * Therefore, stream capture places restrictions on API calls that can be made within - * or concurrently to a ::cuStreamBeginCapture-::cuStreamEndCapture sequence. This - * behavior can be controlled via this API and flags to ::cuStreamBeginCapture. - * - * A thread's mode is one of the following: - * - \p CU_STREAM_CAPTURE_MODE_GLOBAL: This is the default mode. If the local thread has - * an ongoing capture sequence that was not initiated with - * \p CU_STREAM_CAPTURE_MODE_RELAXED at \p cuStreamBeginCapture, or if any other thread - * has a concurrent capture sequence initiated with \p CU_STREAM_CAPTURE_MODE_GLOBAL, - * this thread is prohibited from potentially unsafe API calls. - * - \p CU_STREAM_CAPTURE_MODE_THREAD_LOCAL: If the local thread has an ongoing capture - * sequence not initiated with \p CU_STREAM_CAPTURE_MODE_RELAXED, it is prohibited - * from potentially unsafe API calls. Concurrent capture sequences in other threads - * are ignored. - * - \p CU_STREAM_CAPTURE_MODE_RELAXED: The local thread is not prohibited from potentially - * unsafe API calls. Note that the thread is still prohibited from API calls which - * necessarily conflict with stream capture, for example, attempting ::cuEventQuery - * on an event that was last recorded inside a capture sequence. - * - * \param mode - Pointer to mode value to swap with the current mode - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa - * ::cuStreamBeginCapture - */ -CUresult CUDAAPI cuThreadExchangeStreamCaptureMode(CUstreamCaptureMode *mode); - -/** - * \brief Ends capture on a stream, returning the captured graph - * - * End capture on \p hStream, returning the captured graph via \p phGraph. - * Capture must have been initiated on \p hStream via a call to ::cuStreamBeginCapture. - * If capture was invalidated, due to a violation of the rules of stream capture, then - * a NULL graph will be returned. - * - * If the \p mode argument to ::cuStreamBeginCapture was not - * ::CU_STREAM_CAPTURE_MODE_RELAXED, this call must be from the same thread as - * ::cuStreamBeginCapture. - * - * \param hStream - Stream to query - * \param phGraph - The captured graph - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD - * \notefnerr - * - * \sa - * ::cuStreamCreate, - * ::cuStreamBeginCapture, - * ::cuStreamIsCapturing - */ -CUresult CUDAAPI cuStreamEndCapture(CUstream hStream, CUgraph *phGraph); - -/** - * \brief Returns a stream's capture status - * - * Return the capture status of \p hStream via \p captureStatus. After a successful - * call, \p *captureStatus will contain one of the following: - * - ::CU_STREAM_CAPTURE_STATUS_NONE: The stream is not capturing. - * - ::CU_STREAM_CAPTURE_STATUS_ACTIVE: The stream is capturing. - * - ::CU_STREAM_CAPTURE_STATUS_INVALIDATED: The stream was capturing but an error - * has invalidated the capture sequence. The capture sequence must be terminated - * with ::cuStreamEndCapture on the stream where it was initiated in order to - * continue using \p hStream. - * - * Note that, if this is called on ::CU_STREAM_LEGACY (the "null stream") while - * a blocking stream in the same context is capturing, it will return - * ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT and \p *captureStatus is unspecified - * after the call. The blocking stream capture is not invalidated. - * - * When a blocking stream is capturing, the legacy stream is in an - * unusable state until the blocking stream capture is terminated. The legacy - * stream is not supported for stream capture, but attempted use would have an - * implicit dependency on the capturing stream(s). - * - * \param hStream - Stream to query - * \param captureStatus - Returns the stream's capture status - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT - * \notefnerr - * - * \sa - * ::cuStreamCreate, - * ::cuStreamBeginCapture, - * ::cuStreamEndCapture - */ -CUresult CUDAAPI cuStreamIsCapturing(CUstream hStream, CUstreamCaptureStatus *captureStatus); - -/** - * \brief Query capture status of a stream - * - * Note there is a later version of this API, ::cuStreamGetCaptureInfo_v2. It will - * supplant this version in 12.0, which is retained for minor version compatibility. - * - * Query the capture status of a stream and and get an id for - * the capture sequence, which is unique over the lifetime of the process. - * - * If called on ::CU_STREAM_LEGACY (the "null stream") while a stream not created - * with ::CU_STREAM_NON_BLOCKING is capturing, returns ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT. - * - * A valid id is returned only if both of the following are true: - * - the call returns CUDA_SUCCESS - * - captureStatus is set to ::CU_STREAM_CAPTURE_STATUS_ACTIVE - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT - * \notefnerr - * - * \sa - * ::cuStreamGetCaptureInfo_v2, - * ::cuStreamBeginCapture, - * ::cuStreamIsCapturing - */ -CUresult CUDAAPI cuStreamGetCaptureInfo(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out); - -/** - * \brief Query a stream's capture state (11.3+) - * - * Query stream state related to stream capture. - * - * If called on ::CU_STREAM_LEGACY (the "null stream") while a stream not created - * with ::CU_STREAM_NON_BLOCKING is capturing, returns ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT. - * - * Valid data (other than capture status) is returned only if both of the following are true: - * - the call returns CUDA_SUCCESS - * - the returned capture status is ::CU_STREAM_CAPTURE_STATUS_ACTIVE - * - * This version of cuStreamGetCaptureInfo is introduced in CUDA 11.3 and will supplant the - * previous version in 12.0. Developers requiring compatibility across minor versions to - * CUDA 11.0 (driver version 445) should use ::cuStreamGetCaptureInfo or include a fallback - * path. - * - * \param hStream - The stream to query - * \param captureStatus_out - Location to return the capture status of the stream; required - * \param id_out - Optional location to return an id for the capture sequence, which is - * unique over the lifetime of the process - * \param graph_out - Optional location to return the graph being captured into. All - * operations other than destroy and node removal are permitted on the graph - * while the capture sequence is in progress. This API does not transfer - * ownership of the graph, which is transferred or destroyed at - * ::cuStreamEndCapture. Note that the graph handle may be invalidated before - * end of capture for certain errors. Nodes that are or become - * unreachable from the original stream at ::cuStreamEndCapture due to direct - * actions on the graph do not trigger ::CUDA_ERROR_STREAM_CAPTURE_UNJOINED. - * \param dependencies_out - Optional location to store a pointer to an array of nodes. - * The next node to be captured in the stream will depend on this set of nodes, - * absent operations such as event wait which modify this set. The array pointer - * is valid until the next API call which operates on the stream or until end of - * capture. The node handles may be copied out and are valid until they or the - * graph is destroyed. The driver-owned array may also be passed directly to - * APIs that operate on the graph (not the stream) without copying. - * \param numDependencies_out - Optional location to store the size of the array - * returned in dependencies_out. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_STREAM_CAPTURE_IMPLICIT - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuStreamGetCaptureInfo, - * ::cuStreamBeginCapture, - * ::cuStreamIsCapturing, - * ::cuStreamUpdateCaptureDependencies - */ -CUresult CUDAAPI cuStreamGetCaptureInfo_v2(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, - cuuint64_t *id_out, CUgraph *graph_out, const CUgraphNode **dependencies_out, size_t *numDependencies_out); - -/** - * \brief Update the set of dependencies in a capturing stream (11.3+) - * - * Modifies the dependency set of a capturing stream. The dependency set is the set - * of nodes that the next captured node in the stream will depend on. - * - * Valid flags are ::CU_STREAM_ADD_CAPTURE_DEPENDENCIES and - * ::CU_STREAM_SET_CAPTURE_DEPENDENCIES. These control whether the set passed to - * the API is added to the existing set or replaces it. A flags value of 0 defaults - * to ::CU_STREAM_ADD_CAPTURE_DEPENDENCIES. - * - * Nodes that are removed from the dependency set via this API do not result in - * ::CUDA_ERROR_STREAM_CAPTURE_UNJOINED if they are unreachable from the stream at - * ::cuStreamEndCapture. - * - * Returns ::CUDA_ERROR_ILLEGAL_STATE if the stream is not capturing. - * - * This API is new in CUDA 11.3. Developers requiring compatibility across minor - * versions to CUDA 11.0 should not use this API or provide a fallback. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_ILLEGAL_STATE - * - * \sa - * ::cuStreamBeginCapture, - * ::cuStreamGetCaptureInfo, - * ::cuStreamGetCaptureInfo_v2 - */ -CUresult CUDAAPI cuStreamUpdateCaptureDependencies(CUstream hStream, CUgraphNode *dependencies, size_t numDependencies, unsigned int flags); - -/** - * \brief Attach memory to a stream asynchronously - * - * Enqueues an operation in \p hStream to specify stream association of - * \p length bytes of memory starting from \p dptr. This function is a - * stream-ordered operation, meaning that it is dependent on, and will - * only take effect when, previous work in stream has completed. Any - * previous association is automatically replaced. - * - * \p dptr must point to one of the following types of memories: - * - managed memory declared using the __managed__ keyword or allocated with - * ::cuMemAllocManaged. - * - a valid host-accessible region of system-allocated pageable memory. This - * type of memory may only be specified if the device associated with the - * stream reports a non-zero value for the device attribute - * ::CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS. - * - * For managed allocations, \p length must be either zero or the entire - * allocation's size. Both indicate that the entire allocation's stream - * association is being changed. Currently, it is not possible to change stream - * association for a portion of a managed allocation. - * - * For pageable host allocations, \p length must be non-zero. - * - * The stream association is specified using \p flags which must be - * one of ::CUmemAttach_flags. - * If the ::CU_MEM_ATTACH_GLOBAL flag is specified, the memory can be accessed - * by any stream on any device. - * If the ::CU_MEM_ATTACH_HOST flag is specified, the program makes a guarantee - * that it won't access the memory on the device from any stream on a device that - * has a zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. - * If the ::CU_MEM_ATTACH_SINGLE flag is specified and \p hStream is associated with - * a device that has a zero value for the device attribute ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS, - * the program makes a guarantee that it will only access the memory on the device - * from \p hStream. It is illegal to attach singly to the NULL stream, because the - * NULL stream is a virtual global stream and not a specific stream. An error will - * be returned in this case. - * - * When memory is associated with a single stream, the Unified Memory system will - * allow CPU access to this memory region so long as all operations in \p hStream - * have completed, regardless of whether other streams are active. In effect, - * this constrains exclusive ownership of the managed memory region by - * an active GPU to per-stream activity instead of whole-GPU activity. - * - * Accessing memory on the device from streams that are not associated with - * it will produce undefined results. No error checking is performed by the - * Unified Memory system to ensure that kernels launched into other streams - * do not access this region. - * - * It is a program's responsibility to order calls to ::cuStreamAttachMemAsync - * via events, synchronization or other means to ensure legal access to memory - * at all times. Data visibility and coherency will be changed appropriately - * for all kernels which follow a stream-association change. - * - * If \p hStream is destroyed while data is associated with it, the association is - * removed and the association reverts to the default visibility of the allocation - * as specified at ::cuMemAllocManaged. For __managed__ variables, the default - * association is always ::CU_MEM_ATTACH_GLOBAL. Note that destroying a stream is an - * asynchronous operation, and as a result, the change to default association won't - * happen until all work in the stream has completed. - * - * \param hStream - Stream in which to enqueue the attach operation - * \param dptr - Pointer to memory (must be a pointer to managed memory or - * to a valid host-accessible region of system-allocated - * pageable memory) - * \param length - Length of memory - * \param flags - Must be one of ::CUmemAttach_flags - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_NOT_SUPPORTED - * \note_null_stream - * \notefnerr - * - * \sa ::cuStreamCreate, - * ::cuStreamQuery, - * ::cuStreamSynchronize, - * ::cuStreamWaitEvent, - * ::cuStreamDestroy, - * ::cuMemAllocManaged, - * ::cudaStreamAttachMemAsync - */ -CUresult CUDAAPI cuStreamAttachMemAsync(CUstream hStream, CUdeviceptr dptr, size_t length, unsigned int flags); - -/** - * \brief Determine status of a compute stream - * - * Returns ::CUDA_SUCCESS if all operations in the stream specified by - * \p hStream have completed, or ::CUDA_ERROR_NOT_READY if not. - * - * For the purposes of Unified Memory, a return value of ::CUDA_SUCCESS - * is equivalent to having called ::cuStreamSynchronize(). - * - * \param hStream - Stream to query status of - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_NOT_READY - * \note_null_stream - * \notefnerr - * - * \sa ::cuStreamCreate, - * ::cuStreamWaitEvent, - * ::cuStreamDestroy, - * ::cuStreamSynchronize, - * ::cuStreamAddCallback, - * ::cudaStreamQuery - */ -CUresult CUDAAPI cuStreamQuery(CUstream hStream); - -/** - * \brief Wait until a stream's tasks are completed - * - * Waits until the device has completed all operations in the stream specified - * by \p hStream. If the context was created with the - * ::CU_CTX_SCHED_BLOCKING_SYNC flag, the CPU thread will block until the - * stream is finished with all of its tasks. - * - * \param hStream - Stream to wait for - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE - - * \note_null_stream - * \notefnerr - * - * \sa ::cuStreamCreate, - * ::cuStreamDestroy, - * ::cuStreamWaitEvent, - * ::cuStreamQuery, - * ::cuStreamAddCallback, - * ::cudaStreamSynchronize - */ -CUresult CUDAAPI cuStreamSynchronize(CUstream hStream); - -/** - * \brief Destroys a stream - * - * Destroys the stream specified by \p hStream. - * - * In case the device is still doing work in the stream \p hStream - * when ::cuStreamDestroy() is called, the function will return immediately - * and the resources associated with \p hStream will be released automatically - * once the device has completed all work in \p hStream. - * - * \param hStream - Stream to destroy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * - * \sa ::cuStreamCreate, - * ::cuStreamWaitEvent, - * ::cuStreamQuery, - * ::cuStreamSynchronize, - * ::cuStreamAddCallback, - * ::cudaStreamDestroy - */ -CUresult CUDAAPI cuStreamDestroy(CUstream hStream); - -/** - * \brief Copies attributes from source stream to destination stream. - * - * Copies attributes from source stream \p src to destination stream \p dst. - * Both streams must have the same context. - * - * \param[out] dst Destination stream - * \param[in] src Source stream - * For list of attributes see ::CUstreamAttrID - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa - * ::CUaccessPolicyWindow - */ -CUresult CUDAAPI cuStreamCopyAttributes(CUstream dst, CUstream src); - -/** - * \brief Queries stream attribute. - * - * Queries attribute \p attr from \p hStream and stores it in corresponding - * member of \p value_out. - * - * \param[in] hStream - * \param[in] attr - * \param[out] value_out - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * - * \sa - * ::CUaccessPolicyWindow - */ -CUresult CUDAAPI cuStreamGetAttribute(CUstream hStream, CUstreamAttrID attr, - CUstreamAttrValue *value_out); - -/** - * \brief Sets stream attribute. - * - * Sets attribute \p attr on \p hStream from corresponding attribute of - * \p value. The updated attribute will be applied to subsequent work - * submitted to the stream. It will not affect previously submitted work. - * - * \param[out] hStream - * \param[in] attr - * \param[in] value - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * - * \sa - * ::CUaccessPolicyWindow - */ -CUresult CUDAAPI cuStreamSetAttribute(CUstream hStream, CUstreamAttrID attr, - const CUstreamAttrValue *value); - -/** @} */ /* END CUDA_STREAM */ - - -/** - * \defgroup CUDA_EVENT Event Management - * - * ___MANBRIEF___ event management functions of the low-level CUDA driver API - * (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the event management functions of the low-level CUDA - * driver application programming interface. - * - * @{ - */ - -/** - * \brief Creates an event - * - * Creates an event *phEvent for the current context with the flags specified via - * \p Flags. Valid flags include: - * - ::CU_EVENT_DEFAULT: Default event creation flag. - * - ::CU_EVENT_BLOCKING_SYNC: Specifies that the created event should use blocking - * synchronization. A CPU thread that uses ::cuEventSynchronize() to wait on - * an event created with this flag will block until the event has actually - * been recorded. - * - ::CU_EVENT_DISABLE_TIMING: Specifies that the created event does not need - * to record timing data. Events created with this flag specified and - * the ::CU_EVENT_BLOCKING_SYNC flag not specified will provide the best - * performance when used with ::cuStreamWaitEvent() and ::cuEventQuery(). - * - ::CU_EVENT_INTERPROCESS: Specifies that the created event may be used as an - * interprocess event by ::cuIpcGetEventHandle(). ::CU_EVENT_INTERPROCESS must - * be specified along with ::CU_EVENT_DISABLE_TIMING. - * - * \param phEvent - Returns newly created event - * \param Flags - Event creation flags - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * \notefnerr - * - * \sa - * ::cuEventRecord, - * ::cuEventQuery, - * ::cuEventSynchronize, - * ::cuEventDestroy, - * ::cuEventElapsedTime, - * ::cudaEventCreate, - * ::cudaEventCreateWithFlags - */ -CUresult CUDAAPI cuEventCreate(CUevent *phEvent, unsigned int Flags); - -/** - * \brief Records an event - * - * Captures in \p hEvent the contents of \p hStream at the time of this call. - * \p hEvent and \p hStream must be from the same context. - * Calls such as ::cuEventQuery() or ::cuStreamWaitEvent() will then - * examine or wait for completion of the work that was captured. Uses of - * \p hStream after this call do not modify \p hEvent. See note on default - * stream behavior for what is captured in the default case. - * - * ::cuEventRecord() can be called multiple times on the same event and - * will overwrite the previously captured state. Other APIs such as - * ::cuStreamWaitEvent() use the most recently captured state at the time - * of the API call, and are not affected by later calls to - * ::cuEventRecord(). Before the first call to ::cuEventRecord(), an - * event represents an empty set of work, so for example ::cuEventQuery() - * would return ::CUDA_SUCCESS. - * - * \param hEvent - Event to record - * \param hStream - Stream to record event for - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_INVALID_VALUE - * \note_null_stream - * \notefnerr - * - * \sa ::cuEventCreate, - * ::cuEventQuery, - * ::cuEventSynchronize, - * ::cuStreamWaitEvent, - * ::cuEventDestroy, - * ::cuEventElapsedTime, - * ::cudaEventRecord, - * ::cuEventRecordWithFlags - */ -CUresult CUDAAPI cuEventRecord(CUevent hEvent, CUstream hStream); - -/** - * \brief Records an event - * - * Captures in \p hEvent the contents of \p hStream at the time of this call. - * \p hEvent and \p hStream must be from the same context. - * Calls such as ::cuEventQuery() or ::cuStreamWaitEvent() will then - * examine or wait for completion of the work that was captured. Uses of - * \p hStream after this call do not modify \p hEvent. See note on default - * stream behavior for what is captured in the default case. - * - * ::cuEventRecordWithFlags() can be called multiple times on the same event and - * will overwrite the previously captured state. Other APIs such as - * ::cuStreamWaitEvent() use the most recently captured state at the time - * of the API call, and are not affected by later calls to - * ::cuEventRecordWithFlags(). Before the first call to ::cuEventRecordWithFlags(), an - * event represents an empty set of work, so for example ::cuEventQuery() - * would return ::CUDA_SUCCESS. - * - * flags include: - * - ::CU_EVENT_RECORD_DEFAULT: Default event creation flag. - * - ::CU_EVENT_RECORD_EXTERNAL: Event is captured in the graph as an external - * event node when performing stream capture. This flag is invalid outside - * of stream capture. - * - * \param hEvent - Event to record - * \param hStream - Stream to record event for - * \param flags - See ::CUevent_capture_flags - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_INVALID_VALUE - * \note_null_stream - * \notefnerr - * - * \sa ::cuEventCreate, - * ::cuEventQuery, - * ::cuEventSynchronize, - * ::cuStreamWaitEvent, - * ::cuEventDestroy, - * ::cuEventElapsedTime, - * ::cuEventRecord, - * ::cudaEventRecord - */ -CUresult CUDAAPI cuEventRecordWithFlags(CUevent hEvent, CUstream hStream, unsigned int flags); - -/** - * \brief Queries an event's status - * - * Queries the status of all work currently captured by \p hEvent. See - * ::cuEventRecord() for details on what is captured by an event. - * - * Returns ::CUDA_SUCCESS if all captured work has been completed, or - * ::CUDA_ERROR_NOT_READY if any captured work is incomplete. - * - * For the purposes of Unified Memory, a return value of ::CUDA_SUCCESS - * is equivalent to having called ::cuEventSynchronize(). - * - * \param hEvent - Event to query - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_READY - * \notefnerr - * - * \sa ::cuEventCreate, - * ::cuEventRecord, - * ::cuEventSynchronize, - * ::cuEventDestroy, - * ::cuEventElapsedTime, - * ::cudaEventQuery - */ -CUresult CUDAAPI cuEventQuery(CUevent hEvent); - -/** - * \brief Waits for an event to complete - * - * Waits until the completion of all work currently captured in \p hEvent. - * See ::cuEventRecord() for details on what is captured by an event. - * - * Waiting for an event that was created with the ::CU_EVENT_BLOCKING_SYNC - * flag will cause the calling CPU thread to block until the event has - * been completed by the device. If the ::CU_EVENT_BLOCKING_SYNC flag has - * not been set, then the CPU thread will busy-wait until the event has - * been completed by the device. - * - * \param hEvent - Event to wait for - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * - * \sa ::cuEventCreate, - * ::cuEventRecord, - * ::cuEventQuery, - * ::cuEventDestroy, - * ::cuEventElapsedTime, - * ::cudaEventSynchronize - */ -CUresult CUDAAPI cuEventSynchronize(CUevent hEvent); - -/** - * \brief Destroys an event - * - * Destroys the event specified by \p hEvent. - * - * An event may be destroyed before it is complete (i.e., while - * ::cuEventQuery() would return ::CUDA_ERROR_NOT_READY). In this case, the - * call does not block on completion of the event, and any associated - * resources will automatically be released asynchronously at completion. - * - * \param hEvent - Event to destroy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * - * \sa ::cuEventCreate, - * ::cuEventRecord, - * ::cuEventQuery, - * ::cuEventSynchronize, - * ::cuEventElapsedTime, - * ::cudaEventDestroy - */ -CUresult CUDAAPI cuEventDestroy(CUevent hEvent); - -/** - * \brief Computes the elapsed time between two events - * - * Computes the elapsed time between two events (in milliseconds with a - * resolution of around 0.5 microseconds). - * - * If either event was last recorded in a non-NULL stream, the resulting time - * may be greater than expected (even if both used the same stream handle). This - * happens because the ::cuEventRecord() operation takes place asynchronously - * and there is no guarantee that the measured latency is actually just between - * the two events. Any number of other different stream operations could execute - * in between the two measured events, thus altering the timing in a significant - * way. - * - * If ::cuEventRecord() has not been called on either event then - * ::CUDA_ERROR_INVALID_HANDLE is returned. If ::cuEventRecord() has been called - * on both events but one or both of them has not yet been completed (that is, - * ::cuEventQuery() would return ::CUDA_ERROR_NOT_READY on at least one of the - * events), ::CUDA_ERROR_NOT_READY is returned. If either event was created with - * the ::CU_EVENT_DISABLE_TIMING flag, then this function will return - * ::CUDA_ERROR_INVALID_HANDLE. - * - * \param pMilliseconds - Time between \p hStart and \p hEnd in ms - * \param hStart - Starting event - * \param hEnd - Ending event - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_NOT_READY - * \notefnerr - * - * \sa ::cuEventCreate, - * ::cuEventRecord, - * ::cuEventQuery, - * ::cuEventSynchronize, - * ::cuEventDestroy, - * ::cudaEventElapsedTime - */ -CUresult CUDAAPI cuEventElapsedTime(float *pMilliseconds, CUevent hStart, CUevent hEnd); - -/** @} */ /* END CUDA_EVENT */ - -/** - * \defgroup CUDA_EXTRES_INTEROP External Resource Interoperability - * - * ___MANBRIEF___ External resource interoperability functions of the low-level CUDA driver API - * (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the external resource interoperability functions of the low-level CUDA - * driver application programming interface. - * - * @{ - */ - - /** - * \brief Imports an external memory object - * - * Imports an externally allocated memory object and returns - * a handle to that in \p extMem_out. - * - * The properties of the handle being imported must be described in - * \p memHandleDesc. The ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC structure - * is defined as follows: - * - * \code - typedef struct CUDA_EXTERNAL_MEMORY_HANDLE_DESC_st { - CUexternalMemoryHandleType type; - union { - int fd; - struct { - void *handle; - const void *name; - } win32; - const void *nvSciBufObject; - } handle; - unsigned long long size; - unsigned int flags; - } CUDA_EXTERNAL_MEMORY_HANDLE_DESC; - * \endcode - * - * where ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type specifies the type - * of handle being imported. ::CUexternalMemoryHandleType is - * defined as: - * - * \code - typedef enum CUexternalMemoryHandleType_enum { - CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD = 1, - CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32 = 2, - CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3, - CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP = 4, - CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE = 5, - CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE = 6, - CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT = 7, - CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF = 8 - } CUexternalMemoryHandleType; - * \endcode - * - * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is - * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD, then - * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::fd must be a valid - * file descriptor referencing a memory object. Ownership of - * the file descriptor is transferred to the CUDA driver when the - * handle is imported successfully. Performing any operations on the - * file descriptor after it is imported results in undefined behavior. - * - * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is - * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32, then exactly one - * of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and - * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be - * NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle - * is not NULL, then it must represent a valid shared NT handle that - * references a memory object. Ownership of this handle is - * not transferred to CUDA after the import operation, so the - * application must release the handle using the appropriate system - * call. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name - * is not NULL, then it must point to a NULL-terminated array of - * UTF-16 characters that refers to a memory object. - * - * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is - * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT, then - * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle must - * be non-NULL and - * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name - * must be NULL. The handle specified must be a globally shared KMT - * handle. This handle does not hold a reference to the underlying - * object, and thus will be invalid when all references to the - * memory object are destroyed. - * - * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is - * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP, then exactly one - * of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and - * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be - * NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle - * is not NULL, then it must represent a valid shared NT handle that - * is returned by ID3D12Device::CreateSharedHandle when referring to a - * ID3D12Heap object. This handle holds a reference to the underlying - * object. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name - * is not NULL, then it must point to a NULL-terminated array of - * UTF-16 characters that refers to a ID3D12Heap object. - * - * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is - * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE, then exactly one - * of ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle and - * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name must not be - * NULL. If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle - * is not NULL, then it must represent a valid shared NT handle that - * is returned by ID3D12Device::CreateSharedHandle when referring to a - * ID3D12Resource object. This handle holds a reference to the - * underlying object. If - * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name - * is not NULL, then it must point to a NULL-terminated array of - * UTF-16 characters that refers to a ID3D12Resource object. - * - * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is - * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE, then - * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle must - * represent a valid shared NT handle that is returned by - * IDXGIResource1::CreateSharedHandle when referring to a - * ID3D11Resource object. If - * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name - * is not NULL, then it must point to a NULL-terminated array of - * UTF-16 characters that refers to a ID3D11Resource object. - * - * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is - * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT, then - * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::handle must - * represent a valid shared KMT handle that is returned by - * IDXGIResource::GetSharedHandle when referring to a - * ID3D11Resource object and - * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::win32::name - * must be NULL. - * - * If ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type is - * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, then - * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::handle::nvSciBufObject must be non-NULL - * and reference a valid NvSciBuf object. - * If the NvSciBuf object imported into CUDA is also mapped by other drivers, then the - * application must use ::cuWaitExternalSemaphoresAsync or ::cuSignalExternalSemaphoresAsync - * as appropriate barriers to maintain coherence between CUDA and the other drivers. - * See ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC and ::CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC - * for memory synchronization. - * - * - * The size of the memory object must be specified in - * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::size. - * - * Specifying the flag ::CUDA_EXTERNAL_MEMORY_DEDICATED in - * ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::flags indicates that the - * resource is a dedicated resource. The definition of what a - * dedicated resource is outside the scope of this extension. - * This flag must be set if ::CUDA_EXTERNAL_MEMORY_HANDLE_DESC::type - * is one of the following: - * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE - * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE - * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT - * - * \param extMem_out - Returned handle to an external memory object - * \param memHandleDesc - Memory import handle descriptor - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * - * \note If the Vulkan memory imported into CUDA is mapped on the CPU then the - * application must use vkInvalidateMappedMemoryRanges/vkFlushMappedMemoryRanges - * as well as appropriate Vulkan pipeline barriers to maintain coherence between - * CPU and GPU. For more information on these APIs, please refer to "Synchronization - * and Cache Control" chapter from Vulkan specification. - * - * \sa ::cuDestroyExternalMemory, - * ::cuExternalMemoryGetMappedBuffer, - * ::cuExternalMemoryGetMappedMipmappedArray - */ -CUresult CUDAAPI cuImportExternalMemory(CUexternalMemory *extMem_out, const CUDA_EXTERNAL_MEMORY_HANDLE_DESC *memHandleDesc); - -/** - * \brief Maps a buffer onto an imported memory object - * - * Maps a buffer onto an imported memory object and returns a device - * pointer in \p devPtr. - * - * The properties of the buffer being mapped must be described in - * \p bufferDesc. The ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC structure is - * defined as follows: - * - * \code - typedef struct CUDA_EXTERNAL_MEMORY_BUFFER_DESC_st { - unsigned long long offset; - unsigned long long size; - unsigned int flags; - } CUDA_EXTERNAL_MEMORY_BUFFER_DESC; - * \endcode - * - * where ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::offset is the offset in - * the memory object where the buffer's base address is. - * ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::size is the size of the buffer. - * ::CUDA_EXTERNAL_MEMORY_BUFFER_DESC::flags must be zero. - * - * The offset and size have to be suitably aligned to match the - * requirements of the external API. Mapping two buffers whose ranges - * overlap may or may not result in the same virtual address being - * returned for the overlapped portion. In such cases, the application - * must ensure that all accesses to that region from the GPU are - * volatile. Otherwise writes made via one address are not guaranteed - * to be visible via the other address, even if they're issued by the - * same thread. It is recommended that applications map the combined - * range instead of mapping separate buffers and then apply the - * appropriate offsets to the returned pointer to derive the - * individual buffers. - * - * The returned pointer \p devPtr must be freed using ::cuMemFree. - * - * \param devPtr - Returned device pointer to buffer - * \param extMem - Handle to external memory object - * \param bufferDesc - Buffer descriptor - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * - * \sa ::cuImportExternalMemory, - * ::cuDestroyExternalMemory, - * ::cuExternalMemoryGetMappedMipmappedArray - */ -CUresult CUDAAPI cuExternalMemoryGetMappedBuffer(CUdeviceptr *devPtr, CUexternalMemory extMem, const CUDA_EXTERNAL_MEMORY_BUFFER_DESC *bufferDesc); - -/** - * \brief Maps a CUDA mipmapped array onto an external memory object - * - * Maps a CUDA mipmapped array onto an external object and returns a - * handle to it in \p mipmap. - * - * The properties of the CUDA mipmapped array being mapped must be - * described in \p mipmapDesc. The structure - * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC is defined as follows: - * - * \code - typedef struct CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC_st { - unsigned long long offset; - CUDA_ARRAY3D_DESCRIPTOR arrayDesc; - unsigned int numLevels; - } CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC; - * \endcode - * - * where ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::offset is the - * offset in the memory object where the base level of the mipmap - * chain is. - * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::arrayDesc describes - * the format, dimensions and type of the base level of the mipmap - * chain. For further details on these parameters, please refer to the - * documentation for ::cuMipmappedArrayCreate. Note that if the mipmapped - * array is bound as a color target in the graphics API, then the flag - * ::CUDA_ARRAY3D_COLOR_ATTACHMENT must be specified in - * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::arrayDesc::Flags. - * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::numLevels specifies - * the total number of levels in the mipmap chain. - * - * If \p extMem was imported from a handle of type ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF, then - * ::CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC::numLevels must be equal to 1. - * - * The returned CUDA mipmapped array must be freed using ::cuMipmappedArrayDestroy. - * - * \param mipmap - Returned CUDA mipmapped array - * \param extMem - Handle to external memory object - * \param mipmapDesc - CUDA array descriptor - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * - * \sa ::cuImportExternalMemory, - * ::cuDestroyExternalMemory, - * ::cuExternalMemoryGetMappedBuffer - */ -CUresult CUDAAPI cuExternalMemoryGetMappedMipmappedArray(CUmipmappedArray *mipmap, CUexternalMemory extMem, const CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC *mipmapDesc); - -/** - * \brief Destroys an external memory object. - * - * Destroys the specified external memory object. Any existing buffers - * and CUDA mipmapped arrays mapped onto this object must no longer be - * used and must be explicitly freed using ::cuMemFree and - * ::cuMipmappedArrayDestroy respectively. - * - * \param extMem - External memory object to be destroyed - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * - * \sa ::cuImportExternalMemory, - * ::cuExternalMemoryGetMappedBuffer, - * ::cuExternalMemoryGetMappedMipmappedArray - */ -CUresult CUDAAPI cuDestroyExternalMemory(CUexternalMemory extMem); - -/** - * \brief Imports an external semaphore - * - * Imports an externally allocated synchronization object and returns - * a handle to that in \p extSem_out. - * - * The properties of the handle being imported must be described in - * \p semHandleDesc. The ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC is - * defined as follows: - * - * \code - typedef struct CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC_st { - CUexternalSemaphoreHandleType type; - union { - int fd; - struct { - void *handle; - const void *name; - } win32; - const void* NvSciSyncObj; - } handle; - unsigned int flags; - } CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC; - * \endcode - * - * where ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type specifies the type of - * handle being imported. ::CUexternalSemaphoreHandleType is defined - * as: - * - * \code - typedef enum CUexternalSemaphoreHandleType_enum { - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD = 1, - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32 = 2, - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT = 3, - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE = 4, - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE = 5, - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC = 6, - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX = 7, - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT = 8, - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD = 9, - CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 = 10 - } CUexternalSemaphoreHandleType; - * \endcode - * - * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, then - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::fd must be a valid - * file descriptor referencing a synchronization object. Ownership of - * the file descriptor is transferred to the CUDA driver when the - * handle is imported successfully. Performing any operations on the - * file descriptor after it is imported results in undefined behavior. - * - * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, then exactly one - * of ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle and - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must not be - * NULL. If - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle - * is not NULL, then it must represent a valid shared NT handle that - * references a synchronization object. Ownership of this handle is - * not transferred to CUDA after the import operation, so the - * application must release the handle using the appropriate system - * call. If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name - * is not NULL, then it must name a valid synchronization object. - * - * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT, then - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle must - * be non-NULL and - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name - * must be NULL. The handle specified must be a globally shared KMT - * handle. This handle does not hold a reference to the underlying - * object, and thus will be invalid when all references to the - * synchronization object are destroyed. - * - * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, then exactly one - * of ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle and - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must not be - * NULL. If - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle - * is not NULL, then it must represent a valid shared NT handle that - * is returned by ID3D12Device::CreateSharedHandle when referring to a - * ID3D12Fence object. This handle holds a reference to the underlying - * object. If - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name - * is not NULL, then it must name a valid synchronization object that - * refers to a valid ID3D12Fence object. - * - * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, then - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle - * represents a valid shared NT handle that is returned by - * ID3D11Fence::CreateSharedHandle. If - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name - * is not NULL, then it must name a valid synchronization object that - * refers to a valid ID3D11Fence object. - * - * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, then - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::nvSciSyncObj - * represents a valid NvSciSyncObj. - * - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, then - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle - * represents a valid shared NT handle that - * is returned by IDXGIResource1::CreateSharedHandle when referring to - * a IDXGIKeyedMutex object. If - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name - * is not NULL, then it must name a valid synchronization object that - * refers to a valid IDXGIKeyedMutex object. - * - * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT, then - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle - * represents a valid shared KMT handle that - * is returned by IDXGIResource::GetSharedHandle when referring to - * a IDXGIKeyedMutex object and - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must be NULL. - * - * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, then - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::fd must be a valid - * file descriptor referencing a synchronization object. Ownership of - * the file descriptor is transferred to the CUDA driver when the - * handle is imported successfully. Performing any operations on the - * file descriptor after it is imported results in undefined behavior. - * - * If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::type is - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32, then exactly one - * of ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle and - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name must not be - * NULL. If - * ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::handle - * is not NULL, then it must represent a valid shared NT handle that - * references a synchronization object. Ownership of this handle is - * not transferred to CUDA after the import operation, so the - * application must release the handle using the appropriate system - * call. If ::CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC::handle::win32::name - * is not NULL, then it must name a valid synchronization object. - * - * \param extSem_out - Returned handle to an external semaphore - * \param semHandleDesc - Semaphore import handle descriptor - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_NOT_SUPPORTED, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * - * \sa ::cuDestroyExternalSemaphore, - * ::cuSignalExternalSemaphoresAsync, - * ::cuWaitExternalSemaphoresAsync - */ -CUresult CUDAAPI cuImportExternalSemaphore(CUexternalSemaphore *extSem_out, const CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC *semHandleDesc); - -/** - * \brief Signals a set of external semaphore objects - * - * Enqueues a signal operation on a set of externally allocated - * semaphore object in the specified stream. The operations will be - * executed when all prior operations in the stream complete. - * - * The exact semantics of signaling a semaphore depends on the type of - * the object. - * - * If the semaphore object is any one of the following types: - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT - * then signaling the semaphore will set it to the signaled state. - * - * If the semaphore object is any one of the following types: - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 - * then the semaphore will be set to the value specified in - * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::fence::value. - * - * If the semaphore object is of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC - * this API sets ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence - * to a value that can be used by subsequent waiters of the same NvSciSync object - * to order operations with those currently submitted in \p stream. Such an update - * will overwrite previous contents of - * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence. By default, - * signaling such an external semaphore object causes appropriate memory synchronization - * operations to be performed over all external memory objects that are imported as - * ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. This ensures that any subsequent accesses - * made by other importers of the same set of NvSciBuf memory object(s) are coherent. - * These operations can be skipped by specifying the flag - * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC, which can be used as a - * performance optimization when data coherency is not required. But specifying this - * flag in scenarios where data coherency is required results in undefined behavior. - * Also, for semaphore object of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, - * if the NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags in - * ::cuDeviceGetNvSciSyncAttributes to CUDA_NVSCISYNC_ATTR_SIGNAL, this API will return - * CUDA_ERROR_NOT_SUPPORTED. - * - * If the semaphore object is any one of the following types: - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT - * then the keyed mutex will be released with the key specified in - * ::CUDA_EXTERNAL_SEMAPHORE_PARAMS::params::keyedmutex::key. - * - * \param extSemArray - Set of external semaphores to be signaled - * \param paramsArray - Array of semaphore parameters - * \param numExtSems - Number of semaphores to signal - * \param stream - Stream to enqueue the signal operations in - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_NOT_SUPPORTED - * \notefnerr - * - * \sa ::cuImportExternalSemaphore, - * ::cuDestroyExternalSemaphore, - * ::cuWaitExternalSemaphoresAsync - */ -CUresult CUDAAPI cuSignalExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream); - -/** - * \brief Waits on a set of external semaphore objects - * - * Enqueues a wait operation on a set of externally allocated - * semaphore object in the specified stream. The operations will be - * executed when all prior operations in the stream complete. - * - * The exact semantics of waiting on a semaphore depends on the type - * of the object. - * - * If the semaphore object is any one of the following types: - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD, - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32, - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT - * then waiting on the semaphore will wait until the semaphore reaches - * the signaled state. The semaphore will then be reset to the - * unsignaled state. Therefore for every signal operation, there can - * only be one wait operation. - * - * If the semaphore object is any one of the following types: - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE, - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE, - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_FD, - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_TIMELINE_SEMAPHORE_WIN32 - * then waiting on the semaphore will wait until the value of the - * semaphore is greater than or equal to - * ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::fence::value. - * - * If the semaphore object is of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC - * then, waiting on the semaphore will wait until the - * ::CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS::params::nvSciSync::fence is signaled by the - * signaler of the NvSciSyncObj that was associated with this semaphore object. - * By default, waiting on such an external semaphore object causes appropriate - * memory synchronization operations to be performed over all external memory objects - * that are imported as ::CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF. This ensures that - * any subsequent accesses made by other importers of the same set of NvSciBuf memory - * object(s) are coherent. These operations can be skipped by specifying the flag - * ::CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC, which can be used as a - * performance optimization when data coherency is not required. But specifying this - * flag in scenarios where data coherency is required results in undefined behavior. - * Also, for semaphore object of the type ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC, - * if the NvSciSyncAttrList used to create the NvSciSyncObj had not set the flags in - * ::cuDeviceGetNvSciSyncAttributes to CUDA_NVSCISYNC_ATTR_WAIT, this API will return - * CUDA_ERROR_NOT_SUPPORTED. - * - * If the semaphore object is any one of the following types: - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX, - * ::CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT - * then the keyed mutex will be acquired when it is released with the key - * specified in ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::keyedmutex::key - * or until the timeout specified by - * ::CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS::params::keyedmutex::timeoutMs - * has lapsed. The timeout interval can either be a finite value - * specified in milliseconds or an infinite value. In case an infinite - * value is specified the timeout never elapses. The windows INFINITE - * macro must be used to specify infinite timeout. - * - * \param extSemArray - External semaphores to be waited on - * \param paramsArray - Array of semaphore parameters - * \param numExtSems - Number of semaphores to wait on - * \param stream - Stream to enqueue the wait operations in - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_NOT_SUPPORTED, - * ::CUDA_ERROR_TIMEOUT - * \notefnerr - * - * \sa ::cuImportExternalSemaphore, - * ::cuDestroyExternalSemaphore, - * ::cuSignalExternalSemaphoresAsync - */ -CUresult CUDAAPI cuWaitExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream); - -/** - * \brief Destroys an external semaphore - * - * Destroys an external semaphore object and releases any references - * to the underlying resource. Any outstanding signals or waits must - * have completed before the semaphore is destroyed. - * - * \param extSem - External semaphore to be destroyed - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * - * \sa ::cuImportExternalSemaphore, - * ::cuSignalExternalSemaphoresAsync, - * ::cuWaitExternalSemaphoresAsync - */ -CUresult CUDAAPI cuDestroyExternalSemaphore(CUexternalSemaphore extSem); - -/** @} */ /* END CUDA_EXTRES_INTEROP */ - -/** - * \defgroup CUDA_MEMOP Stream memory operations - * - * ___MANBRIEF___ Stream memory operations of the low-level CUDA driver API - * (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the stream memory operations of the low-level CUDA - * driver application programming interface. - * - * The whole set of operations is disabled by default. Users are required - * to explicitly enable them, e.g. on Linux by passing the kernel module - * parameter shown below: - * modprobe nvidia NVreg_EnableStreamMemOPs=1 - * There is currently no way to enable these operations on other operating - * systems. - * - * Users can programmatically query whether the device supports these - * operations with ::cuDeviceGetAttribute() and - * ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS. - * - * Support for the ::CU_STREAM_WAIT_VALUE_NOR flag can be queried with - * ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR. - * - * Support for the ::cuStreamWriteValue64() and ::cuStreamWaitValue64() - * functions, as well as for the ::CU_STREAM_MEM_OP_WAIT_VALUE_64 and - * ::CU_STREAM_MEM_OP_WRITE_VALUE_64 flags, can be queried with - * ::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS. - * - * Support for both ::CU_STREAM_WAIT_VALUE_FLUSH and - * ::CU_STREAM_MEM_OP_FLUSH_REMOTE_WRITES requires dedicated platform - * hardware features and can be queried with ::cuDeviceGetAttribute() and - * ::CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES. - * - * Note that all memory pointers passed as parameters to these operations - * are device pointers. Where necessary a device pointer should be - * obtained, for example with ::cuMemHostGetDevicePointer(). - * - * None of the operations accepts pointers to managed memory buffers - * (::cuMemAllocManaged). - * - * @{ - */ - -/** - * \brief Wait on a memory location - * - * Enqueues a synchronization of the stream on the given memory location. Work - * ordered after the operation will block until the given condition on the - * memory is satisfied. By default, the condition is to wait for - * (int32_t)(*addr - value) >= 0, a cyclic greater-or-equal. - * Other condition types can be specified via \p flags. - * - * If the memory was registered via ::cuMemHostRegister(), the device pointer - * should be obtained with ::cuMemHostGetDevicePointer(). This function cannot - * be used with managed memory (::cuMemAllocManaged). - * - * Support for this can be queried with ::cuDeviceGetAttribute() and - * ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS. - * - * Support for CU_STREAM_WAIT_VALUE_NOR can be queried with ::cuDeviceGetAttribute() and - * ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR. - * - * \param stream The stream to synchronize on the memory location. - * \param addr The memory location to wait on. - * \param value The value to compare with the memory location. - * \param flags See ::CUstreamWaitValue_flags. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_SUPPORTED - * \notefnerr - * - * \sa ::cuStreamWaitValue64, - * ::cuStreamWriteValue32, - * ::cuStreamWriteValue64, - * ::cuStreamBatchMemOp, - * ::cuMemHostRegister, - * ::cuStreamWaitEvent - */ -CUresult CUDAAPI cuStreamWaitValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); - -/** - * \brief Wait on a memory location - * - * Enqueues a synchronization of the stream on the given memory location. Work - * ordered after the operation will block until the given condition on the - * memory is satisfied. By default, the condition is to wait for - * (int64_t)(*addr - value) >= 0, a cyclic greater-or-equal. - * Other condition types can be specified via \p flags. - * - * If the memory was registered via ::cuMemHostRegister(), the device pointer - * should be obtained with ::cuMemHostGetDevicePointer(). - * - * Support for this can be queried with ::cuDeviceGetAttribute() and - * ::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS. - * - * \param stream The stream to synchronize on the memory location. - * \param addr The memory location to wait on. - * \param value The value to compare with the memory location. - * \param flags See ::CUstreamWaitValue_flags. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_SUPPORTED - * \notefnerr - * - * \sa ::cuStreamWaitValue32, - * ::cuStreamWriteValue32, - * ::cuStreamWriteValue64, - * ::cuStreamBatchMemOp, - * ::cuMemHostRegister, - * ::cuStreamWaitEvent - */ -CUresult CUDAAPI cuStreamWaitValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); - -/** - * \brief Write a value to memory - * - * Write a value to memory. Unless the ::CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER - * flag is passed, the write is preceded by a system-wide memory fence, - * equivalent to a __threadfence_system() but scoped to the stream - * rather than a CUDA thread. - * - * If the memory was registered via ::cuMemHostRegister(), the device pointer - * should be obtained with ::cuMemHostGetDevicePointer(). This function cannot - * be used with managed memory (::cuMemAllocManaged). - * - * Support for this can be queried with ::cuDeviceGetAttribute() and - * ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS. - * - * \param stream The stream to do the write in. - * \param addr The device address to write to. - * \param value The value to write. - * \param flags See ::CUstreamWriteValue_flags. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_SUPPORTED - * \notefnerr - * - * \sa ::cuStreamWriteValue64, - * ::cuStreamWaitValue32, - * ::cuStreamWaitValue64, - * ::cuStreamBatchMemOp, - * ::cuMemHostRegister, - * ::cuEventRecord - */ -CUresult CUDAAPI cuStreamWriteValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); - -/** - * \brief Write a value to memory - * - * Write a value to memory. Unless the ::CU_STREAM_WRITE_VALUE_NO_MEMORY_BARRIER - * flag is passed, the write is preceded by a system-wide memory fence, - * equivalent to a __threadfence_system() but scoped to the stream - * rather than a CUDA thread. - * - * If the memory was registered via ::cuMemHostRegister(), the device pointer - * should be obtained with ::cuMemHostGetDevicePointer(). - * - * Support for this can be queried with ::cuDeviceGetAttribute() and - * ::CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS. - * - * \param stream The stream to do the write in. - * \param addr The device address to write to. - * \param value The value to write. - * \param flags See ::CUstreamWriteValue_flags. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_SUPPORTED - * \notefnerr - * - * \sa ::cuStreamWriteValue32, - * ::cuStreamWaitValue32, - * ::cuStreamWaitValue64, - * ::cuStreamBatchMemOp, - * ::cuMemHostRegister, - * ::cuEventRecord - */ -CUresult CUDAAPI cuStreamWriteValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); - -/** - * \brief Batch operations to synchronize the stream via memory operations - * - * This is a batch version of ::cuStreamWaitValue32() and ::cuStreamWriteValue32(). - * Batching operations may avoid some performance overhead in both the API call - * and the device execution versus adding them to the stream in separate API - * calls. The operations are enqueued in the order they appear in the array. - * - * See ::CUstreamBatchMemOpType for the full set of supported operations, and - * ::cuStreamWaitValue32(), ::cuStreamWaitValue64(), ::cuStreamWriteValue32(), - * and ::cuStreamWriteValue64() for details of specific operations. - * - * Basic support for this can be queried with ::cuDeviceGetAttribute() and - * ::CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS. See related APIs for details - * on querying support for specific operations. - * - * \param stream The stream to enqueue the operations in. - * \param count The number of operations in the array. Must be less than 256. - * \param paramArray The types and parameters of the individual operations. - * \param flags Reserved for future expansion; must be 0. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_SUPPORTED - * \notefnerr - * - * \sa ::cuStreamWaitValue32, - * ::cuStreamWaitValue64, - * ::cuStreamWriteValue32, - * ::cuStreamWriteValue64, - * ::cuMemHostRegister - */ -CUresult CUDAAPI cuStreamBatchMemOp(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags); - -/** @} */ /* END CUDA_MEMOP */ - -/** - * \defgroup CUDA_EXEC Execution Control - * - * ___MANBRIEF___ execution control functions of the low-level CUDA driver API - * (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the execution control functions of the low-level CUDA - * driver application programming interface. - * - * @{ - */ - -/** - * \brief Returns information about a function - * - * Returns in \p *pi the integer value of the attribute \p attrib on the kernel - * given by \p hfunc. The supported attributes are: - * - ::CU_FUNC_ATTRIBUTE_MAX_THREADS_PER_BLOCK: The maximum number of threads - * per block, beyond which a launch of the function would fail. This number - * depends on both the function and the device on which the function is - * currently loaded. - * - ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES: The size in bytes of - * statically-allocated shared memory per block required by this function. - * This does not include dynamically-allocated shared memory requested by - * the user at runtime. - * - ::CU_FUNC_ATTRIBUTE_CONST_SIZE_BYTES: The size in bytes of user-allocated - * constant memory required by this function. - * - ::CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES: The size in bytes of local memory - * used by each thread of this function. - * - ::CU_FUNC_ATTRIBUTE_NUM_REGS: The number of registers used by each thread - * of this function. - * - ::CU_FUNC_ATTRIBUTE_PTX_VERSION: The PTX virtual architecture version for - * which the function was compiled. This value is the major PTX version * 10 - * + the minor PTX version, so a PTX version 1.3 function would return the - * value 13. Note that this may return the undefined value of 0 for cubins - * compiled prior to CUDA 3.0. - * - ::CU_FUNC_ATTRIBUTE_BINARY_VERSION: The binary architecture version for - * which the function was compiled. This value is the major binary - * version * 10 + the minor binary version, so a binary version 1.3 function - * would return the value 13. Note that this will return a value of 10 for - * legacy cubins that do not have a properly-encoded binary architecture - * version. - * - ::CU_FUNC_CACHE_MODE_CA: The attribute to indicate whether the function has - * been compiled with user specified option "-Xptxas --dlcm=ca" set . - * - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: The maximum size in bytes of - * dynamically-allocated shared memory. - * - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: Preferred shared memory-L1 - * cache split ratio in percent of total shared memory. - * - * \param pi - Returned attribute value - * \param attrib - Attribute requested - * \param hfunc - Function to query attribute of - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuCtxGetCacheConfig, - * ::cuCtxSetCacheConfig, - * ::cuFuncSetCacheConfig, - * ::cuLaunchKernel, - * ::cudaFuncGetAttributes, - * ::cudaFuncSetAttribute - */ -CUresult CUDAAPI cuFuncGetAttribute(int *pi, CUfunction_attribute attrib, CUfunction hfunc); - -/** - * \brief Sets information about a function - * - * This call sets the value of a specified attribute \p attrib on the kernel given - * by \p hfunc to an integer value specified by \p val - * This function returns CUDA_SUCCESS if the new value of the attribute could be - * successfully set. If the set fails, this call will return an error. - * Not all attributes can have values set. Attempting to set a value on a read-only - * attribute will result in an error (CUDA_ERROR_INVALID_VALUE) - * - * Supported attributes for the cuFuncSetAttribute call are: - * - ::CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES: This maximum size in bytes of - * dynamically-allocated shared memory. The value should contain the requested - * maximum size of dynamically-allocated shared memory. The sum of this value and - * the function attribute ::CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES cannot exceed the - * device attribute ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN. - * The maximal size of requestable dynamic shared memory may differ by GPU - * architecture. - * - ::CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT: On devices where the L1 - * cache and shared memory use the same hardware resources, this sets the shared memory - * carveout preference, in percent of the total shared memory. - * See ::CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR - * This is only a hint, and the driver can choose a different ratio if required to execute the function. - * - * \param hfunc - Function to query attribute of - * \param attrib - Attribute requested - * \param value - The value to set - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuCtxGetCacheConfig, - * ::cuCtxSetCacheConfig, - * ::cuFuncSetCacheConfig, - * ::cuLaunchKernel, - * ::cudaFuncGetAttributes, - * ::cudaFuncSetAttribute - */ -CUresult CUDAAPI cuFuncSetAttribute(CUfunction hfunc, CUfunction_attribute attrib, int value); - -/** - * \brief Sets the preferred cache configuration for a device function - * - * On devices where the L1 cache and shared memory use the same hardware - * resources, this sets through \p config the preferred cache configuration for - * the device function \p hfunc. This is only a preference. The driver will use - * the requested configuration if possible, but it is free to choose a different - * configuration if required to execute \p hfunc. Any context-wide preference - * set via ::cuCtxSetCacheConfig() will be overridden by this per-function - * setting unless the per-function setting is ::CU_FUNC_CACHE_PREFER_NONE. In - * that case, the current context-wide setting will be used. - * - * This setting does nothing on devices where the size of the L1 cache and - * shared memory are fixed. - * - * Launching a kernel with a different preference than the most recent - * preference setting may insert a device-side synchronization point. - * - * - * The supported cache configurations are: - * - ::CU_FUNC_CACHE_PREFER_NONE: no preference for shared memory or L1 (default) - * - ::CU_FUNC_CACHE_PREFER_SHARED: prefer larger shared memory and smaller L1 cache - * - ::CU_FUNC_CACHE_PREFER_L1: prefer larger L1 cache and smaller shared memory - * - ::CU_FUNC_CACHE_PREFER_EQUAL: prefer equal sized L1 cache and shared memory - * - * \param hfunc - Kernel to configure cache for - * \param config - Requested cache configuration - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT - * \notefnerr - * - * \sa ::cuCtxGetCacheConfig, - * ::cuCtxSetCacheConfig, - * ::cuFuncGetAttribute, - * ::cuLaunchKernel, - * ::cudaFuncSetCacheConfig - */ -CUresult CUDAAPI cuFuncSetCacheConfig(CUfunction hfunc, CUfunc_cache config); - -/** - * \brief Sets the shared memory configuration for a device function. - * - * On devices with configurable shared memory banks, this function will - * force all subsequent launches of the specified device function to have - * the given shared memory bank size configuration. On any given launch of the - * function, the shared memory configuration of the device will be temporarily - * changed if needed to suit the function's preferred configuration. Changes in - * shared memory configuration between subsequent launches of functions, - * may introduce a device side synchronization point. - * - * Any per-function setting of shared memory bank size set via - * ::cuFuncSetSharedMemConfig will override the context wide setting set with - * ::cuCtxSetSharedMemConfig. - * - * Changing the shared memory bank size will not increase shared memory usage - * or affect occupancy of kernels, but may have major effects on performance. - * Larger bank sizes will allow for greater potential bandwidth to shared memory, - * but will change what kinds of accesses to shared memory will result in bank - * conflicts. - * - * This function will do nothing on devices with fixed shared memory bank size. - * - * The supported bank configurations are: - * - ::CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE: use the context's shared memory - * configuration when launching this function. - * - ::CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE: set shared memory bank width to - * be natively four bytes when launching this function. - * - ::CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE: set shared memory bank width to - * be natively eight bytes when launching this function. - * - * \param hfunc - kernel to be given a shared memory config - * \param config - requested shared memory configuration - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT - * \notefnerr - * - * \sa ::cuCtxGetCacheConfig, - * ::cuCtxSetCacheConfig, - * ::cuCtxGetSharedMemConfig, - * ::cuCtxSetSharedMemConfig, - * ::cuFuncGetAttribute, - * ::cuLaunchKernel, - * ::cudaFuncSetSharedMemConfig - */ -CUresult CUDAAPI cuFuncSetSharedMemConfig(CUfunction hfunc, CUsharedconfig config); - -/** - * \brief Returns a module handle - * - * Returns in \p *hmod the handle of the module that function \p hfunc - * is located in. The lifetime of the module corresponds to the lifetime of - * the context it was loaded in or until the module is explicitly unloaded. - * - * The CUDA runtime manages its own modules loaded into the primary context. - * If the handle returned by this API refers to a module loaded by the CUDA runtime, - * calling ::cuModuleUnload() on that module will result in undefined behavior. - * - * \param hmod - Returned module handle - * \param hfunc - Function to retrieve module for - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_FOUND - * \notefnerr - * - */ -CUresult CUDAAPI cuFuncGetModule(CUmodule *hmod, CUfunction hfunc); - -/** - * \brief Launches a CUDA function - * - * Invokes the kernel \p f on a \p gridDimX x \p gridDimY x \p gridDimZ - * grid of blocks. Each block contains \p blockDimX x \p blockDimY x - * \p blockDimZ threads. - * - * \p sharedMemBytes sets the amount of dynamic shared memory that will be - * available to each thread block. - * - * Kernel parameters to \p f can be specified in one of two ways: - * - * 1) Kernel parameters can be specified via \p kernelParams. If \p f - * has N parameters, then \p kernelParams needs to be an array of N - * pointers. Each of \p kernelParams[0] through \p kernelParams[N-1] - * must point to a region of memory from which the actual kernel - * parameter will be copied. The number of kernel parameters and their - * offsets and sizes do not need to be specified as that information is - * retrieved directly from the kernel's image. - * - * 2) Kernel parameters can also be packaged by the application into - * a single buffer that is passed in via the \p extra parameter. - * This places the burden on the application of knowing each kernel - * parameter's size and alignment/padding within the buffer. Here is - * an example of using the \p extra parameter in this manner: - * \code - size_t argBufferSize; - char argBuffer[256]; - - // populate argBuffer and argBufferSize - - void *config[] = { - CU_LAUNCH_PARAM_BUFFER_POINTER, argBuffer, - CU_LAUNCH_PARAM_BUFFER_SIZE, &argBufferSize, - CU_LAUNCH_PARAM_END - }; - status = cuLaunchKernel(f, gx, gy, gz, bx, by, bz, sh, s, NULL, config); - * \endcode - * - * The \p extra parameter exists to allow ::cuLaunchKernel to take - * additional less commonly used arguments. \p extra specifies a list of - * names of extra settings and their corresponding values. Each extra - * setting name is immediately followed by the corresponding value. The - * list must be terminated with either NULL or ::CU_LAUNCH_PARAM_END. - * - * - ::CU_LAUNCH_PARAM_END, which indicates the end of the \p extra - * array; - * - ::CU_LAUNCH_PARAM_BUFFER_POINTER, which specifies that the next - * value in \p extra will be a pointer to a buffer containing all - * the kernel parameters for launching kernel \p f; - * - ::CU_LAUNCH_PARAM_BUFFER_SIZE, which specifies that the next - * value in \p extra will be a pointer to a size_t containing the - * size of the buffer specified with ::CU_LAUNCH_PARAM_BUFFER_POINTER; - * - * The error ::CUDA_ERROR_INVALID_VALUE will be returned if kernel - * parameters are specified with both \p kernelParams and \p extra - * (i.e. both \p kernelParams and \p extra are non-NULL). - * - * Calling ::cuLaunchKernel() invalidates the persistent function state - * set through the following deprecated APIs: - * ::cuFuncSetBlockShape(), - * ::cuFuncSetSharedSize(), - * ::cuParamSetSize(), - * ::cuParamSeti(), - * ::cuParamSetf(), - * ::cuParamSetv(). - * - * Note that to use ::cuLaunchKernel(), the kernel \p f must either have - * been compiled with toolchain version 3.2 or later so that it will - * contain kernel parameter information, or have no kernel parameters. - * If either of these conditions is not met, then ::cuLaunchKernel() will - * return ::CUDA_ERROR_INVALID_IMAGE. - * - * \param f - Kernel to launch - * \param gridDimX - Width of grid in blocks - * \param gridDimY - Height of grid in blocks - * \param gridDimZ - Depth of grid in blocks - * \param blockDimX - X dimension of each thread block - * \param blockDimY - Y dimension of each thread block - * \param blockDimZ - Z dimension of each thread block - * \param sharedMemBytes - Dynamic shared-memory size per thread block in bytes - * \param hStream - Stream identifier - * \param kernelParams - Array of pointers to kernel parameters - * \param extra - Extra options - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_INVALID_IMAGE, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_LAUNCH_FAILED, - * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, - * ::CUDA_ERROR_LAUNCH_TIMEOUT, - * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, - * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED - * \note_null_stream - * \notefnerr - * - * \sa ::cuCtxGetCacheConfig, - * ::cuCtxSetCacheConfig, - * ::cuFuncSetCacheConfig, - * ::cuFuncGetAttribute, - * ::cudaLaunchKernel - */ -CUresult CUDAAPI cuLaunchKernel(CUfunction f, - unsigned int gridDimX, - unsigned int gridDimY, - unsigned int gridDimZ, - unsigned int blockDimX, - unsigned int blockDimY, - unsigned int blockDimZ, - unsigned int sharedMemBytes, - CUstream hStream, - void **kernelParams, - void **extra); - -/** - * \brief Launches a CUDA function where thread blocks can cooperate and synchronize as they execute - * - * Invokes the kernel \p f on a \p gridDimX x \p gridDimY x \p gridDimZ - * grid of blocks. Each block contains \p blockDimX x \p blockDimY x - * \p blockDimZ threads. - * - * \p sharedMemBytes sets the amount of dynamic shared memory that will be - * available to each thread block. - * - * The device on which this kernel is invoked must have a non-zero value for - * the device attribute ::CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH. - * - * The total number of blocks launched cannot exceed the maximum number of blocks per - * multiprocessor as returned by ::cuOccupancyMaxActiveBlocksPerMultiprocessor (or - * ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors - * as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. - * - * The kernel cannot make use of CUDA dynamic parallelism. - * - * Kernel parameters must be specified via \p kernelParams. If \p f - * has N parameters, then \p kernelParams needs to be an array of N - * pointers. Each of \p kernelParams[0] through \p kernelParams[N-1] - * must point to a region of memory from which the actual kernel - * parameter will be copied. The number of kernel parameters and their - * offsets and sizes do not need to be specified as that information is - * retrieved directly from the kernel's image. - * - * Calling ::cuLaunchCooperativeKernel() sets persistent function state that is - * the same as function state set through ::cuLaunchKernel API - * - * When the kernel \p f is launched via ::cuLaunchCooperativeKernel(), the previous - * block shape, shared size and parameter info associated with \p f - * is overwritten. - * - * Note that to use ::cuLaunchCooperativeKernel(), the kernel \p f must either have - * been compiled with toolchain version 3.2 or later so that it will - * contain kernel parameter information, or have no kernel parameters. - * If either of these conditions is not met, then ::cuLaunchCooperativeKernel() will - * return ::CUDA_ERROR_INVALID_IMAGE. - * - * \param f - Kernel to launch - * \param gridDimX - Width of grid in blocks - * \param gridDimY - Height of grid in blocks - * \param gridDimZ - Depth of grid in blocks - * \param blockDimX - X dimension of each thread block - * \param blockDimY - Y dimension of each thread block - * \param blockDimZ - Z dimension of each thread block - * \param sharedMemBytes - Dynamic shared-memory size per thread block in bytes - * \param hStream - Stream identifier - * \param kernelParams - Array of pointers to kernel parameters - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_INVALID_IMAGE, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_LAUNCH_FAILED, - * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, - * ::CUDA_ERROR_LAUNCH_TIMEOUT, - * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, - * ::CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE, - * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED - * \note_null_stream - * \notefnerr - * - * \sa ::cuCtxGetCacheConfig, - * ::cuCtxSetCacheConfig, - * ::cuFuncSetCacheConfig, - * ::cuFuncGetAttribute, - * ::cuLaunchCooperativeKernelMultiDevice, - * ::cudaLaunchCooperativeKernel - */ -CUresult CUDAAPI cuLaunchCooperativeKernel(CUfunction f, - unsigned int gridDimX, - unsigned int gridDimY, - unsigned int gridDimZ, - unsigned int blockDimX, - unsigned int blockDimY, - unsigned int blockDimZ, - unsigned int sharedMemBytes, - CUstream hStream, - void **kernelParams); - -/** - * \brief Launches CUDA functions on multiple devices where thread blocks can cooperate and synchronize as they execute - * - * \deprecated This function is deprecated as of CUDA 11.3. - * - * Invokes kernels as specified in the \p launchParamsList array where each element - * of the array specifies all the parameters required to perform a single kernel launch. - * These kernels can cooperate and synchronize as they execute. The size of the array is - * specified by \p numDevices. - * - * No two kernels can be launched on the same device. All the devices targeted by this - * multi-device launch must be identical. All devices must have a non-zero value for the - * device attribute ::CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH. - * - * All kernels launched must be identical with respect to the compiled code. Note that - * any __device__, __constant__ or __managed__ variables present in the module that owns - * the kernel launched on each device, are independently instantiated on every device. - * It is the application's responsibility to ensure these variables are initialized and - * used appropriately. - * - * The size of the grids as specified in blocks, the size of the blocks themselves - * and the amount of shared memory used by each thread block must also match across - * all launched kernels. - * - * The streams used to launch these kernels must have been created via either ::cuStreamCreate - * or ::cuStreamCreateWithPriority. The NULL stream or ::CU_STREAM_LEGACY or ::CU_STREAM_PER_THREAD - * cannot be used. - * - * The total number of blocks launched per kernel cannot exceed the maximum number of blocks - * per multiprocessor as returned by ::cuOccupancyMaxActiveBlocksPerMultiprocessor (or - * ::cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags) times the number of multiprocessors - * as specified by the device attribute ::CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT. Since the - * total number of blocks launched per device has to match across all devices, the maximum - * number of blocks that can be launched per device will be limited by the device with the - * least number of multiprocessors. - * - * The kernels cannot make use of CUDA dynamic parallelism. - * - * The ::CUDA_LAUNCH_PARAMS structure is defined as: - * \code - typedef struct CUDA_LAUNCH_PARAMS_st - { - CUfunction function; - unsigned int gridDimX; - unsigned int gridDimY; - unsigned int gridDimZ; - unsigned int blockDimX; - unsigned int blockDimY; - unsigned int blockDimZ; - unsigned int sharedMemBytes; - CUstream hStream; - void **kernelParams; - } CUDA_LAUNCH_PARAMS; - * \endcode - * where: - * - ::CUDA_LAUNCH_PARAMS::function specifies the kernel to be launched. All functions must - * be identical with respect to the compiled code. - * - ::CUDA_LAUNCH_PARAMS::gridDimX is the width of the grid in blocks. This must match across - * all kernels launched. - * - ::CUDA_LAUNCH_PARAMS::gridDimY is the height of the grid in blocks. This must match across - * all kernels launched. - * - ::CUDA_LAUNCH_PARAMS::gridDimZ is the depth of the grid in blocks. This must match across - * all kernels launched. - * - ::CUDA_LAUNCH_PARAMS::blockDimX is the X dimension of each thread block. This must match across - * all kernels launched. - * - ::CUDA_LAUNCH_PARAMS::blockDimX is the Y dimension of each thread block. This must match across - * all kernels launched. - * - ::CUDA_LAUNCH_PARAMS::blockDimZ is the Z dimension of each thread block. This must match across - * all kernels launched. - * - ::CUDA_LAUNCH_PARAMS::sharedMemBytes is the dynamic shared-memory size per thread block in bytes. - * This must match across all kernels launched. - * - ::CUDA_LAUNCH_PARAMS::hStream is the handle to the stream to perform the launch in. This cannot - * be the NULL stream or ::CU_STREAM_LEGACY or ::CU_STREAM_PER_THREAD. The CUDA context associated - * with this stream must match that associated with ::CUDA_LAUNCH_PARAMS::function. - * - ::CUDA_LAUNCH_PARAMS::kernelParams is an array of pointers to kernel parameters. If - * ::CUDA_LAUNCH_PARAMS::function has N parameters, then ::CUDA_LAUNCH_PARAMS::kernelParams - * needs to be an array of N pointers. Each of ::CUDA_LAUNCH_PARAMS::kernelParams[0] through - * ::CUDA_LAUNCH_PARAMS::kernelParams[N-1] must point to a region of memory from which the actual - * kernel parameter will be copied. The number of kernel parameters and their offsets and sizes - * do not need to be specified as that information is retrieved directly from the kernel's image. - * - * By default, the kernel won't begin execution on any GPU until all prior work in all the specified - * streams has completed. This behavior can be overridden by specifying the flag - * ::CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC. When this flag is specified, each kernel - * will only wait for prior work in the stream corresponding to that GPU to complete before it begins - * execution. - * - * Similarly, by default, any subsequent work pushed in any of the specified streams will not begin - * execution until the kernels on all GPUs have completed. This behavior can be overridden by specifying - * the flag ::CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC. When this flag is specified, - * any subsequent work pushed in any of the specified streams will only wait for the kernel launched - * on the GPU corresponding to that stream to complete before it begins execution. - * - * Calling ::cuLaunchCooperativeKernelMultiDevice() sets persistent function state that is - * the same as function state set through ::cuLaunchKernel API when called individually for each - * element in \p launchParamsList. - * - * When kernels are launched via ::cuLaunchCooperativeKernelMultiDevice(), the previous - * block shape, shared size and parameter info associated with each ::CUDA_LAUNCH_PARAMS::function - * in \p launchParamsList is overwritten. - * - * Note that to use ::cuLaunchCooperativeKernelMultiDevice(), the kernels must either have - * been compiled with toolchain version 3.2 or later so that it will - * contain kernel parameter information, or have no kernel parameters. - * If either of these conditions is not met, then ::cuLaunchCooperativeKernelMultiDevice() will - * return ::CUDA_ERROR_INVALID_IMAGE. - * - * \param launchParamsList - List of launch parameters, one per device - * \param numDevices - Size of the \p launchParamsList array - * \param flags - Flags to control launch behavior - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_INVALID_IMAGE, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_LAUNCH_FAILED, - * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, - * ::CUDA_ERROR_LAUNCH_TIMEOUT, - * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, - * ::CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE, - * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED - * \note_null_stream - * \notefnerr - * - * \sa ::cuCtxGetCacheConfig, - * ::cuCtxSetCacheConfig, - * ::cuFuncSetCacheConfig, - * ::cuFuncGetAttribute, - * ::cuLaunchCooperativeKernel, - * ::cudaLaunchCooperativeKernelMultiDevice - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuLaunchCooperativeKernelMultiDevice(CUDA_LAUNCH_PARAMS *launchParamsList, unsigned int numDevices, unsigned int flags); - -/** - * \brief Enqueues a host function call in a stream - * - * Enqueues a host function to run in a stream. The function will be called - * after currently enqueued work and will block work added after it. - * - * The host function must not make any CUDA API calls. Attempting to use a - * CUDA API may result in ::CUDA_ERROR_NOT_PERMITTED, but this is not required. - * The host function must not perform any synchronization that may depend on - * outstanding CUDA work not mandated to run earlier. Host functions without a - * mandated order (such as in independent streams) execute in undefined order - * and may be serialized. - * - * For the purposes of Unified Memory, execution makes a number of guarantees: - *
    - *
  • The stream is considered idle for the duration of the function's - * execution. Thus, for example, the function may always use memory attached - * to the stream it was enqueued in.
  • - *
  • The start of execution of the function has the same effect as - * synchronizing an event recorded in the same stream immediately prior to - * the function. It thus synchronizes streams which have been "joined" - * prior to the function.
  • - *
  • Adding device work to any stream does not have the effect of making - * the stream active until all preceding host functions and stream callbacks - * have executed. Thus, for - * example, a function might use global attached memory even if work has - * been added to another stream, if the work has been ordered behind the - * function call with an event.
  • - *
  • Completion of the function does not cause a stream to become - * active except as described above. The stream will remain idle - * if no device work follows the function, and will remain idle across - * consecutive host functions or stream callbacks without device work in - * between. Thus, for example, - * stream synchronization can be done by signaling from a host function at the - * end of the stream.
  • - *
- * - * Note that, in contrast to ::cuStreamAddCallback, the function will not be - * called in the event of an error in the CUDA context. - * - * \param hStream - Stream to enqueue function call in - * \param fn - The function to call once preceding stream operations are complete - * \param userData - User-specified data to be passed to the function - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_NOT_SUPPORTED - * \note_null_stream - * \notefnerr - * - * \sa ::cuStreamCreate, - * ::cuStreamQuery, - * ::cuStreamSynchronize, - * ::cuStreamWaitEvent, - * ::cuStreamDestroy, - * ::cuMemAllocManaged, - * ::cuStreamAttachMemAsync, - * ::cuStreamAddCallback - */ -CUresult CUDAAPI cuLaunchHostFunc(CUstream hStream, CUhostFn fn, void *userData); - -/** @} */ /* END CUDA_EXEC */ - -/** - * \defgroup CUDA_EXEC_DEPRECATED Execution Control [DEPRECATED] - * - * ___MANBRIEF___ deprecated execution control functions of the low-level CUDA - * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the deprecated execution control functions of the - * low-level CUDA driver application programming interface. - * - * @{ - */ - -/** - * \brief Sets the block-dimensions for the function - * - * \deprecated - * - * Specifies the \p x, \p y, and \p z dimensions of the thread blocks that are - * created when the kernel given by \p hfunc is launched. - * - * \param hfunc - Kernel to specify dimensions of - * \param x - X dimension - * \param y - Y dimension - * \param z - Z dimension - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuFuncSetSharedSize, - * ::cuFuncSetCacheConfig, - * ::cuFuncGetAttribute, - * ::cuParamSetSize, - * ::cuParamSeti, - * ::cuParamSetf, - * ::cuParamSetv, - * ::cuLaunch, - * ::cuLaunchGrid, - * ::cuLaunchGridAsync, - * ::cuLaunchKernel - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuFuncSetBlockShape(CUfunction hfunc, int x, int y, int z); - -/** - * \brief Sets the dynamic shared-memory size for the function - * - * \deprecated - * - * Sets through \p bytes the amount of dynamic shared memory that will be - * available to each thread block when the kernel given by \p hfunc is launched. - * - * \param hfunc - Kernel to specify dynamic shared-memory size for - * \param bytes - Dynamic shared-memory size per thread in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuFuncSetBlockShape, - * ::cuFuncSetCacheConfig, - * ::cuFuncGetAttribute, - * ::cuParamSetSize, - * ::cuParamSeti, - * ::cuParamSetf, - * ::cuParamSetv, - * ::cuLaunch, - * ::cuLaunchGrid, - * ::cuLaunchGridAsync, - * ::cuLaunchKernel - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuFuncSetSharedSize(CUfunction hfunc, unsigned int bytes); - -/** - * \brief Sets the parameter size for the function - * - * \deprecated - * - * Sets through \p numbytes the total size in bytes needed by the function - * parameters of the kernel corresponding to \p hfunc. - * - * \param hfunc - Kernel to set parameter size for - * \param numbytes - Size of parameter list in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuFuncSetBlockShape, - * ::cuFuncSetSharedSize, - * ::cuFuncGetAttribute, - * ::cuParamSetf, - * ::cuParamSeti, - * ::cuParamSetv, - * ::cuLaunch, - * ::cuLaunchGrid, - * ::cuLaunchGridAsync, - * ::cuLaunchKernel - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuParamSetSize(CUfunction hfunc, unsigned int numbytes); - -/** - * \brief Adds an integer parameter to the function's argument list - * - * \deprecated - * - * Sets an integer parameter that will be specified the next time the - * kernel corresponding to \p hfunc will be invoked. \p offset is a byte offset. - * - * \param hfunc - Kernel to add parameter to - * \param offset - Offset to add parameter to argument list - * \param value - Value of parameter - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuFuncSetBlockShape, - * ::cuFuncSetSharedSize, - * ::cuFuncGetAttribute, - * ::cuParamSetSize, - * ::cuParamSetf, - * ::cuParamSetv, - * ::cuLaunch, - * ::cuLaunchGrid, - * ::cuLaunchGridAsync, - * ::cuLaunchKernel - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuParamSeti(CUfunction hfunc, int offset, unsigned int value); - -/** - * \brief Adds a floating-point parameter to the function's argument list - * - * \deprecated - * - * Sets a floating-point parameter that will be specified the next time the - * kernel corresponding to \p hfunc will be invoked. \p offset is a byte offset. - * - * \param hfunc - Kernel to add parameter to - * \param offset - Offset to add parameter to argument list - * \param value - Value of parameter - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuFuncSetBlockShape, - * ::cuFuncSetSharedSize, - * ::cuFuncGetAttribute, - * ::cuParamSetSize, - * ::cuParamSeti, - * ::cuParamSetv, - * ::cuLaunch, - * ::cuLaunchGrid, - * ::cuLaunchGridAsync, - * ::cuLaunchKernel - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuParamSetf(CUfunction hfunc, int offset, float value); - -/** - * \brief Adds arbitrary data to the function's argument list - * - * \deprecated - * - * Copies an arbitrary amount of data (specified in \p numbytes) from \p ptr - * into the parameter space of the kernel corresponding to \p hfunc. \p offset - * is a byte offset. - * - * \param hfunc - Kernel to add data to - * \param offset - Offset to add data to argument list - * \param ptr - Pointer to arbitrary data - * \param numbytes - Size of data to copy in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa ::cuFuncSetBlockShape, - * ::cuFuncSetSharedSize, - * ::cuFuncGetAttribute, - * ::cuParamSetSize, - * ::cuParamSetf, - * ::cuParamSeti, - * ::cuLaunch, - * ::cuLaunchGrid, - * ::cuLaunchGridAsync, - * ::cuLaunchKernel - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuParamSetv(CUfunction hfunc, int offset, void *ptr, unsigned int numbytes); - -/** - * \brief Launches a CUDA function - * - * \deprecated - * - * Invokes the kernel \p f on a 1 x 1 x 1 grid of blocks. The block - * contains the number of threads specified by a previous call to - * ::cuFuncSetBlockShape(). - * - * The block shape, dynamic shared memory size, and parameter information - * must be set using - * ::cuFuncSetBlockShape(), - * ::cuFuncSetSharedSize(), - * ::cuParamSetSize(), - * ::cuParamSeti(), - * ::cuParamSetf(), and - * ::cuParamSetv() - * prior to calling this function. - * - * Launching a function via ::cuLaunchKernel() invalidates the function's - * block shape, dynamic shared memory size, and parameter information. After - * launching via cuLaunchKernel, this state must be re-initialized prior to - * calling this function. Failure to do so results in undefined behavior. - * - * \param f - Kernel to launch - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_LAUNCH_FAILED, - * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, - * ::CUDA_ERROR_LAUNCH_TIMEOUT, - * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, - * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED - * \notefnerr - * - * \sa ::cuFuncSetBlockShape, - * ::cuFuncSetSharedSize, - * ::cuFuncGetAttribute, - * ::cuParamSetSize, - * ::cuParamSetf, - * ::cuParamSeti, - * ::cuParamSetv, - * ::cuLaunchGrid, - * ::cuLaunchGridAsync, - * ::cuLaunchKernel - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuLaunch(CUfunction f); - -/** - * \brief Launches a CUDA function - * - * \deprecated - * - * Invokes the kernel \p f on a \p grid_width x \p grid_height grid of - * blocks. Each block contains the number of threads specified by a previous - * call to ::cuFuncSetBlockShape(). - * - * The block shape, dynamic shared memory size, and parameter information - * must be set using - * ::cuFuncSetBlockShape(), - * ::cuFuncSetSharedSize(), - * ::cuParamSetSize(), - * ::cuParamSeti(), - * ::cuParamSetf(), and - * ::cuParamSetv() - * prior to calling this function. - * - * Launching a function via ::cuLaunchKernel() invalidates the function's - * block shape, dynamic shared memory size, and parameter information. After - * launching via cuLaunchKernel, this state must be re-initialized prior to - * calling this function. Failure to do so results in undefined behavior. - * - * \param f - Kernel to launch - * \param grid_width - Width of grid in blocks - * \param grid_height - Height of grid in blocks - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_LAUNCH_FAILED, - * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, - * ::CUDA_ERROR_LAUNCH_TIMEOUT, - * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, - * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED - * \notefnerr - * - * \sa ::cuFuncSetBlockShape, - * ::cuFuncSetSharedSize, - * ::cuFuncGetAttribute, - * ::cuParamSetSize, - * ::cuParamSetf, - * ::cuParamSeti, - * ::cuParamSetv, - * ::cuLaunch, - * ::cuLaunchGridAsync, - * ::cuLaunchKernel - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuLaunchGrid(CUfunction f, int grid_width, int grid_height); - -/** - * \brief Launches a CUDA function - * - * \deprecated - * - * Invokes the kernel \p f on a \p grid_width x \p grid_height grid of - * blocks. Each block contains the number of threads specified by a previous - * call to ::cuFuncSetBlockShape(). - * - * The block shape, dynamic shared memory size, and parameter information - * must be set using - * ::cuFuncSetBlockShape(), - * ::cuFuncSetSharedSize(), - * ::cuParamSetSize(), - * ::cuParamSeti(), - * ::cuParamSetf(), and - * ::cuParamSetv() - * prior to calling this function. - * - * Launching a function via ::cuLaunchKernel() invalidates the function's - * block shape, dynamic shared memory size, and parameter information. After - * launching via cuLaunchKernel, this state must be re-initialized prior to - * calling this function. Failure to do so results in undefined behavior. - * - * \param f - Kernel to launch - * \param grid_width - Width of grid in blocks - * \param grid_height - Height of grid in blocks - * \param hStream - Stream identifier - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_LAUNCH_FAILED, - * ::CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES, - * ::CUDA_ERROR_LAUNCH_TIMEOUT, - * ::CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING, - * ::CUDA_ERROR_SHARED_OBJECT_INIT_FAILED - * - * \note In certain cases where cubins are created with no ABI (i.e., using \p ptxas \p --abi-compile \p no), - * this function may serialize kernel launches. The CUDA driver retains asynchronous behavior by - * growing the per-thread stack as needed per launch and not shrinking it afterwards. - * - * \note_null_stream - * \notefnerr - * - * \sa ::cuFuncSetBlockShape, - * ::cuFuncSetSharedSize, - * ::cuFuncGetAttribute, - * ::cuParamSetSize, - * ::cuParamSetf, - * ::cuParamSeti, - * ::cuParamSetv, - * ::cuLaunch, - * ::cuLaunchGrid, - * ::cuLaunchKernel - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuLaunchGridAsync(CUfunction f, int grid_width, int grid_height, CUstream hStream); - - -/** - * \brief Adds a texture-reference to the function's argument list - * - * \deprecated - * - * Makes the CUDA array or linear memory bound to the texture reference - * \p hTexRef available to a device program as a texture. In this version of - * CUDA, the texture-reference must be obtained via ::cuModuleGetTexRef() and - * the \p texunit parameter must be set to ::CU_PARAM_TR_DEFAULT. - * - * \param hfunc - Kernel to add texture-reference to - * \param texunit - Texture unit (must be ::CU_PARAM_TR_DEFAULT) - * \param hTexRef - Texture-reference to add to argument list - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuParamSetTexRef(CUfunction hfunc, int texunit, CUtexref hTexRef); -/** @} */ /* END CUDA_EXEC_DEPRECATED */ - -/** - * \defgroup CUDA_GRAPH Graph Management - * - * ___MANBRIEF___ graph management functions of the low-level CUDA driver API - * (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the graph management functions of the low-level CUDA - * driver application programming interface. - * - * @{ - */ - -/** - * \brief Creates a graph - * - * Creates an empty graph, which is returned via \p phGraph. - * - * \param phGraph - Returns newly created graph - * \param flags - Graph creation flags, must be 0 - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddChildGraphNode, - * ::cuGraphAddEmptyNode, - * ::cuGraphAddKernelNode, - * ::cuGraphAddHostNode, - * ::cuGraphAddMemcpyNode, - * ::cuGraphAddMemsetNode, - * ::cuGraphInstantiate, - * ::cuGraphDestroy, - * ::cuGraphGetNodes, - * ::cuGraphGetRootNodes, - * ::cuGraphGetEdges, - * ::cuGraphClone - */ -CUresult CUDAAPI cuGraphCreate(CUgraph *phGraph, unsigned int flags); - -/** - * \brief Creates a kernel execution node and adds it to a graph - * - * Creates a new kernel execution node and adds it to \p hGraph with \p numDependencies - * dependencies specified via \p dependencies and arguments specified in \p nodeParams. - * It is possible for \p numDependencies to be 0, in which case the node will be placed - * at the root of the graph. \p dependencies may not have any duplicate entries. - * A handle to the new node will be returned in \p phGraphNode. - * - * The CUDA_KERNEL_NODE_PARAMS structure is defined as: - * - * \code - * typedef struct CUDA_KERNEL_NODE_PARAMS_st { - * CUfunction func; - * unsigned int gridDimX; - * unsigned int gridDimY; - * unsigned int gridDimZ; - * unsigned int blockDimX; - * unsigned int blockDimY; - * unsigned int blockDimZ; - * unsigned int sharedMemBytes; - * void **kernelParams; - * void **extra; - * } CUDA_KERNEL_NODE_PARAMS; - * \endcode - * - * When the graph is launched, the node will invoke kernel \p func on a (\p gridDimX x - * \p gridDimY x \p gridDimZ) grid of blocks. Each block contains - * (\p blockDimX x \p blockDimY x \p blockDimZ) threads. - * - * \p sharedMemBytes sets the amount of dynamic shared memory that will be - * available to each thread block. - * - * Kernel parameters to \p func can be specified in one of two ways: - * - * 1) Kernel parameters can be specified via \p kernelParams. If the kernel has N - * parameters, then \p kernelParams needs to be an array of N pointers. Each pointer, - * from \p kernelParams[0] to \p kernelParams[N-1], points to the region of memory from which the actual - * parameter will be copied. The number of kernel parameters and their offsets and sizes do not need - * to be specified as that information is retrieved directly from the kernel's image. - * - * 2) Kernel parameters for non-cooperative kernels can also be packaged by the application into a single - * buffer that is passed in via \p extra. This places the burden on the application of knowing each - * kernel parameter's size and alignment/padding within the buffer. The \p extra parameter exists - * to allow this function to take additional less commonly used arguments. \p extra specifies - * a list of names of extra settings and their corresponding values. Each extra setting name is - * immediately followed by the corresponding value. The list must be terminated with either NULL or - * CU_LAUNCH_PARAM_END. - * - * - ::CU_LAUNCH_PARAM_END, which indicates the end of the \p extra - * array; - * - ::CU_LAUNCH_PARAM_BUFFER_POINTER, which specifies that the next - * value in \p extra will be a pointer to a buffer - * containing all the kernel parameters for launching kernel - * \p func; - * - ::CU_LAUNCH_PARAM_BUFFER_SIZE, which specifies that the next - * value in \p extra will be a pointer to a size_t - * containing the size of the buffer specified with - * ::CU_LAUNCH_PARAM_BUFFER_POINTER; - * - * The error ::CUDA_ERROR_INVALID_VALUE will be returned if kernel parameters are specified with both - * \p kernelParams and \p extra (i.e. both \p kernelParams and \p extra are non-NULL). - * ::CUDA_ERROR_INVALID_VALUE will be returned if \p extra is used for a cooperative kernel. - * - * The \p kernelParams or \p extra array, as well as the argument values it points to, - * are copied during this call. - * - * \note Kernels launched using graphs must not use texture and surface references. Reading or - * writing through any texture or surface reference is undefined behavior. - * This restriction does not apply to texture and surface objects. - * - * \param phGraphNode - Returns newly created node - * \param hGraph - Graph to which to add the node - * \param dependencies - Dependencies of the node - * \param numDependencies - Number of dependencies - * \param nodeParams - Parameters for the GPU execution node - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuLaunchKernel, - * ::cuLaunchCooperativeKernel, - * ::cuGraphKernelNodeGetParams, - * ::cuGraphKernelNodeSetParams, - * ::cuGraphCreate, - * ::cuGraphDestroyNode, - * ::cuGraphAddChildGraphNode, - * ::cuGraphAddEmptyNode, - * ::cuGraphAddHostNode, - * ::cuGraphAddMemcpyNode, - * ::cuGraphAddMemsetNode - */ -CUresult CUDAAPI cuGraphAddKernelNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_KERNEL_NODE_PARAMS *nodeParams); - -/** - * \brief Returns a kernel node's parameters - * - * Returns the parameters of kernel node \p hNode in \p nodeParams. - * The \p kernelParams or \p extra array returned in \p nodeParams, - * as well as the argument values it points to, are owned by the node. - * This memory remains valid until the node is destroyed or its - * parameters are modified, and should not be modified - * directly. Use ::cuGraphKernelNodeSetParams to update the - * parameters of this node. - * - * The params will contain either \p kernelParams or \p extra, - * according to which of these was most recently set on the node. - * - * \param hNode - Node to get the parameters for - * \param nodeParams - Pointer to return the parameters - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuLaunchKernel, - * ::cuGraphAddKernelNode, - * ::cuGraphKernelNodeSetParams - */ -CUresult CUDAAPI cuGraphKernelNodeGetParams(CUgraphNode hNode, CUDA_KERNEL_NODE_PARAMS *nodeParams); - -/** - * \brief Sets a kernel node's parameters - * - * Sets the parameters of kernel node \p hNode to \p nodeParams. - * - * \param hNode - Node to set the parameters for - * \param nodeParams - Parameters to copy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuLaunchKernel, - * ::cuGraphAddKernelNode, - * ::cuGraphKernelNodeGetParams - */ -CUresult CUDAAPI cuGraphKernelNodeSetParams(CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS *nodeParams); - -/** - * \brief Creates a memcpy node and adds it to a graph - * - * Creates a new memcpy node and adds it to \p hGraph with \p numDependencies - * dependencies specified via \p dependencies. - * It is possible for \p numDependencies to be 0, in which case the node will be placed - * at the root of the graph. \p dependencies may not have any duplicate entries. - * A handle to the new node will be returned in \p phGraphNode. - * - * When the graph is launched, the node will perform the memcpy described by \p copyParams. - * See ::cuMemcpy3D() for a description of the structure and its restrictions. - * - * Memcpy nodes have some additional restrictions with regards to managed memory, if the - * system contains at least one device which has a zero value for the device attribute - * ::CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS. If one or more of the operands refer - * to managed memory, then using the memory type ::CU_MEMORYTYPE_UNIFIED is disallowed - * for those operand(s). The managed memory will be treated as residing on either the - * host or the device, depending on which memory type is specified. - * - * \param phGraphNode - Returns newly created node - * \param hGraph - Graph to which to add the node - * \param dependencies - Dependencies of the node - * \param numDependencies - Number of dependencies - * \param copyParams - Parameters for the memory copy - * \param ctx - Context on which to run the node - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuMemcpy3D, - * ::cuGraphMemcpyNodeGetParams, - * ::cuGraphMemcpyNodeSetParams, - * ::cuGraphCreate, - * ::cuGraphDestroyNode, - * ::cuGraphAddChildGraphNode, - * ::cuGraphAddEmptyNode, - * ::cuGraphAddKernelNode, - * ::cuGraphAddHostNode, - * ::cuGraphAddMemsetNode - */ -CUresult CUDAAPI cuGraphAddMemcpyNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_MEMCPY3D *copyParams, CUcontext ctx); - -/** - * \brief Returns a memcpy node's parameters - * - * Returns the parameters of memcpy node \p hNode in \p nodeParams. - * - * \param hNode - Node to get the parameters for - * \param nodeParams - Pointer to return the parameters - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuMemcpy3D, - * ::cuGraphAddMemcpyNode, - * ::cuGraphMemcpyNodeSetParams - */ -CUresult CUDAAPI cuGraphMemcpyNodeGetParams(CUgraphNode hNode, CUDA_MEMCPY3D *nodeParams); - -/** - * \brief Sets a memcpy node's parameters - * - * Sets the parameters of memcpy node \p hNode to \p nodeParams. - * - * \param hNode - Node to set the parameters for - * \param nodeParams - Parameters to copy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuMemcpy3D, - * ::cuGraphAddMemcpyNode, - * ::cuGraphMemcpyNodeGetParams - */ -CUresult CUDAAPI cuGraphMemcpyNodeSetParams(CUgraphNode hNode, const CUDA_MEMCPY3D *nodeParams); - -/** - * \brief Creates a memset node and adds it to a graph - * - * Creates a new memset node and adds it to \p hGraph with \p numDependencies - * dependencies specified via \p dependencies. - * It is possible for \p numDependencies to be 0, in which case the node will be placed - * at the root of the graph. \p dependencies may not have any duplicate entries. - * A handle to the new node will be returned in \p phGraphNode. - * - * The element size must be 1, 2, or 4 bytes. - * When the graph is launched, the node will perform the memset described by \p memsetParams. - * - * \param phGraphNode - Returns newly created node - * \param hGraph - Graph to which to add the node - * \param dependencies - Dependencies of the node - * \param numDependencies - Number of dependencies - * \param memsetParams - Parameters for the memory set - * \param ctx - Context on which to run the node - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_CONTEXT - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuMemsetD2D32, - * ::cuGraphMemsetNodeGetParams, - * ::cuGraphMemsetNodeSetParams, - * ::cuGraphCreate, - * ::cuGraphDestroyNode, - * ::cuGraphAddChildGraphNode, - * ::cuGraphAddEmptyNode, - * ::cuGraphAddKernelNode, - * ::cuGraphAddHostNode, - * ::cuGraphAddMemcpyNode - */ -CUresult CUDAAPI cuGraphAddMemsetNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_MEMSET_NODE_PARAMS *memsetParams, CUcontext ctx); - -/** - * \brief Returns a memset node's parameters - * - * Returns the parameters of memset node \p hNode in \p nodeParams. - * - * \param hNode - Node to get the parameters for - * \param nodeParams - Pointer to return the parameters - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuMemsetD2D32, - * ::cuGraphAddMemsetNode, - * ::cuGraphMemsetNodeSetParams - */ -CUresult CUDAAPI cuGraphMemsetNodeGetParams(CUgraphNode hNode, CUDA_MEMSET_NODE_PARAMS *nodeParams); - -/** - * \brief Sets a memset node's parameters - * - * Sets the parameters of memset node \p hNode to \p nodeParams. - * - * \param hNode - Node to set the parameters for - * \param nodeParams - Parameters to copy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuMemsetD2D32, - * ::cuGraphAddMemsetNode, - * ::cuGraphMemsetNodeGetParams - */ -CUresult CUDAAPI cuGraphMemsetNodeSetParams(CUgraphNode hNode, const CUDA_MEMSET_NODE_PARAMS *nodeParams); - -/** - * \brief Creates a host execution node and adds it to a graph - * - * Creates a new CPU execution node and adds it to \p hGraph with \p numDependencies - * dependencies specified via \p dependencies and arguments specified in \p nodeParams. - * It is possible for \p numDependencies to be 0, in which case the node will be placed - * at the root of the graph. \p dependencies may not have any duplicate entries. - * A handle to the new node will be returned in \p phGraphNode. - * - * When the graph is launched, the node will invoke the specified CPU function. - * Host nodes are not supported under MPS with pre-Volta GPUs. - * - * \param phGraphNode - Returns newly created node - * \param hGraph - Graph to which to add the node - * \param dependencies - Dependencies of the node - * \param numDependencies - Number of dependencies - * \param nodeParams - Parameters for the host node - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_NOT_SUPPORTED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuLaunchHostFunc, - * ::cuGraphHostNodeGetParams, - * ::cuGraphHostNodeSetParams, - * ::cuGraphCreate, - * ::cuGraphDestroyNode, - * ::cuGraphAddChildGraphNode, - * ::cuGraphAddEmptyNode, - * ::cuGraphAddKernelNode, - * ::cuGraphAddMemcpyNode, - * ::cuGraphAddMemsetNode - */ -CUresult CUDAAPI cuGraphAddHostNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_HOST_NODE_PARAMS *nodeParams); - -/** - * \brief Returns a host node's parameters - * - * Returns the parameters of host node \p hNode in \p nodeParams. - * - * \param hNode - Node to get the parameters for - * \param nodeParams - Pointer to return the parameters - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuLaunchHostFunc, - * ::cuGraphAddHostNode, - * ::cuGraphHostNodeSetParams - */ -CUresult CUDAAPI cuGraphHostNodeGetParams(CUgraphNode hNode, CUDA_HOST_NODE_PARAMS *nodeParams); - -/** - * \brief Sets a host node's parameters - * - * Sets the parameters of host node \p hNode to \p nodeParams. - * - * \param hNode - Node to set the parameters for - * \param nodeParams - Parameters to copy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuLaunchHostFunc, - * ::cuGraphAddHostNode, - * ::cuGraphHostNodeGetParams - */ -CUresult CUDAAPI cuGraphHostNodeSetParams(CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS *nodeParams); - -/** - * \brief Creates a child graph node and adds it to a graph - * - * Creates a new node which executes an embedded graph, and adds it to \p hGraph with - * \p numDependencies dependencies specified via \p dependencies. - * It is possible for \p numDependencies to be 0, in which case the node will be placed - * at the root of the graph. \p dependencies may not have any duplicate entries. - * A handle to the new node will be returned in \p phGraphNode. - * - * If \p hGraph contains allocation or free nodes, this call will return an error. - * - * The node executes an embedded child graph. The child graph is cloned in this call. - * - * \param phGraphNode - Returns newly created node - * \param hGraph - Graph to which to add the node - * \param dependencies - Dependencies of the node - * \param numDependencies - Number of dependencies - * \param childGraph - The graph to clone into this node - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphChildGraphNodeGetGraph, - * ::cuGraphCreate, - * ::cuGraphDestroyNode, - * ::cuGraphAddEmptyNode, - * ::cuGraphAddKernelNode, - * ::cuGraphAddHostNode, - * ::cuGraphAddMemcpyNode, - * ::cuGraphAddMemsetNode, - * ::cuGraphClone - */ -CUresult CUDAAPI cuGraphAddChildGraphNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUgraph childGraph); - -/** - * \brief Gets a handle to the embedded graph of a child graph node - * - * Gets a handle to the embedded graph in a child graph node. This call - * does not clone the graph. Changes to the graph will be reflected in - * the node, and the node retains ownership of the graph. - * - * Allocation and free nodes cannot be added to the returned graph. - * Attempting to do so will return an error. - * - * \param hNode - Node to get the embedded graph for - * \param phGraph - Location to store a handle to the graph - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddChildGraphNode, - * ::cuGraphNodeFindInClone - */ -CUresult CUDAAPI cuGraphChildGraphNodeGetGraph(CUgraphNode hNode, CUgraph *phGraph); - -/** - * \brief Creates an empty node and adds it to a graph - * - * Creates a new node which performs no operation, and adds it to \p hGraph with - * \p numDependencies dependencies specified via \p dependencies. - * It is possible for \p numDependencies to be 0, in which case the node will be placed - * at the root of the graph. \p dependencies may not have any duplicate entries. - * A handle to the new node will be returned in \p phGraphNode. - * - * An empty node performs no operation during execution, but can be used for - * transitive ordering. For example, a phased execution graph with 2 groups of n - * nodes with a barrier between them can be represented using an empty node and - * 2*n dependency edges, rather than no empty node and n^2 dependency edges. - * - * \param phGraphNode - Returns newly created node - * \param hGraph - Graph to which to add the node - * \param dependencies - Dependencies of the node - * \param numDependencies - Number of dependencies - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE, - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphCreate, - * ::cuGraphDestroyNode, - * ::cuGraphAddChildGraphNode, - * ::cuGraphAddKernelNode, - * ::cuGraphAddHostNode, - * ::cuGraphAddMemcpyNode, - * ::cuGraphAddMemsetNode - */ -CUresult CUDAAPI cuGraphAddEmptyNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies); - -/** - * \brief Creates an event record node and adds it to a graph - * - * Creates a new event record node and adds it to \p hGraph with \p numDependencies - * dependencies specified via \p dependencies and event specified in \p event. - * It is possible for \p numDependencies to be 0, in which case the node will be placed - * at the root of the graph. \p dependencies may not have any duplicate entries. - * A handle to the new node will be returned in \p phGraphNode. - * - * Each launch of the graph will record \p event to capture execution of the - * node's dependencies. - * - * \param phGraphNode - Returns newly created node - * \param hGraph - Graph to which to add the node - * \param dependencies - Dependencies of the node - * \param numDependencies - Number of dependencies - * \param event - Event for the node - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_NOT_SUPPORTED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddEventWaitNode, - * ::cuEventRecordWithFlags, - * ::cuStreamWaitEvent, - * ::cuGraphCreate, - * ::cuGraphDestroyNode, - * ::cuGraphAddChildGraphNode, - * ::cuGraphAddEmptyNode, - * ::cuGraphAddKernelNode, - * ::cuGraphAddMemcpyNode, - * ::cuGraphAddMemsetNode, - */ -CUresult CUDAAPI cuGraphAddEventRecordNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUevent event); - -/** - * \brief Returns the event associated with an event record node - * - * Returns the event of event record node \p hNode in \p event_out. - * - * \param hNode - Node to get the event for - * \param event_out - Pointer to return the event - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddEventRecordNode, - * ::cuGraphEventRecordNodeSetEvent, - * ::cuGraphEventWaitNodeGetEvent, - * ::cuEventRecordWithFlags, - * ::cuStreamWaitEvent - */ -CUresult CUDAAPI cuGraphEventRecordNodeGetEvent(CUgraphNode hNode, CUevent *event_out); - -/** - * \brief Sets an event record node's event - * - * Sets the event of event record node \p hNode to \p event. - * - * \param hNode - Node to set the event for - * \param event - Event to use - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddEventRecordNode, - * ::cuGraphEventRecordNodeGetEvent, - * ::cuGraphEventWaitNodeSetEvent, - * ::cuEventRecordWithFlags, - * ::cuStreamWaitEvent - */ -CUresult CUDAAPI cuGraphEventRecordNodeSetEvent(CUgraphNode hNode, CUevent event); - -/** - * \brief Creates an event wait node and adds it to a graph - * - * Creates a new event wait node and adds it to \p hGraph with \p numDependencies - * dependencies specified via \p dependencies and event specified in \p event. - * It is possible for \p numDependencies to be 0, in which case the node will be placed - * at the root of the graph. \p dependencies may not have any duplicate entries. - * A handle to the new node will be returned in \p phGraphNode. - * - * The graph node will wait for all work captured in \p event. See ::cuEventRecord() - * for details on what is captured by an event. \p event may be from a different context - * or device than the launch stream. - * - * \param phGraphNode - Returns newly created node - * \param hGraph - Graph to which to add the node - * \param dependencies - Dependencies of the node - * \param numDependencies - Number of dependencies - * \param event - Event for the node - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_NOT_SUPPORTED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddEventRecordNode, - * ::cuEventRecordWithFlags, - * ::cuStreamWaitEvent, - * ::cuGraphCreate, - * ::cuGraphDestroyNode, - * ::cuGraphAddChildGraphNode, - * ::cuGraphAddEmptyNode, - * ::cuGraphAddKernelNode, - * ::cuGraphAddMemcpyNode, - * ::cuGraphAddMemsetNode, - */ -CUresult CUDAAPI cuGraphAddEventWaitNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUevent event); - -/** - * \brief Returns the event associated with an event wait node - * - * Returns the event of event wait node \p hNode in \p event_out. - * - * \param hNode - Node to get the event for - * \param event_out - Pointer to return the event - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddEventWaitNode, - * ::cuGraphEventWaitNodeSetEvent, - * ::cuGraphEventRecordNodeGetEvent, - * ::cuEventRecordWithFlags, - * ::cuStreamWaitEvent - */ -CUresult CUDAAPI cuGraphEventWaitNodeGetEvent(CUgraphNode hNode, CUevent *event_out); - -/** - * \brief Sets an event wait node's event - * - * Sets the event of event wait node \p hNode to \p event. - * - * \param hNode - Node to set the event for - * \param event - Event to use - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddEventWaitNode, - * ::cuGraphEventWaitNodeGetEvent, - * ::cuGraphEventRecordNodeSetEvent, - * ::cuEventRecordWithFlags, - * ::cuStreamWaitEvent - */ -CUresult CUDAAPI cuGraphEventWaitNodeSetEvent(CUgraphNode hNode, CUevent event); - -/** - * \brief Creates an external semaphore signal node and adds it to a graph - * - * Creates a new external semaphore signal node and adds it to \p hGraph with \p - * numDependencies dependencies specified via \p dependencies and arguments specified - * in \p nodeParams. It is possible for \p numDependencies to be 0, in which case the - * node will be placed at the root of the graph. \p dependencies may not have any - * duplicate entries. A handle to the new node will be returned in \p phGraphNode. - * - * Performs a signal operation on a set of externally allocated semaphore objects - * when the node is launched. The operation(s) will occur after all of the node's - * dependencies have completed. - * - * \param phGraphNode - Returns newly created node - * \param hGraph - Graph to which to add the node - * \param dependencies - Dependencies of the node - * \param numDependencies - Number of dependencies - * \param nodeParams - Parameters for the node - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_NOT_SUPPORTED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphExternalSemaphoresSignalNodeGetParams, - * ::cuGraphExternalSemaphoresSignalNodeSetParams, - * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, - * ::cuGraphAddExternalSemaphoresWaitNode, - * ::cuImportExternalSemaphore, - * ::cuSignalExternalSemaphoresAsync, - * ::cuWaitExternalSemaphoresAsync, - * ::cuGraphCreate, - * ::cuGraphDestroyNode, - * ::cuGraphAddEventRecordNode, - * ::cuGraphAddEventWaitNode, - * ::cuGraphAddChildGraphNode, - * ::cuGraphAddEmptyNode, - * ::cuGraphAddKernelNode, - * ::cuGraphAddMemcpyNode, - * ::cuGraphAddMemsetNode, - */ -CUresult CUDAAPI cuGraphAddExternalSemaphoresSignalNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams); - -/** - * \brief Returns an external semaphore signal node's parameters - * - * Returns the parameters of an external semaphore signal node \p hNode in \p params_out. - * The \p extSemArray and \p paramsArray returned in \p params_out, - * are owned by the node. This memory remains valid until the node is destroyed or its - * parameters are modified, and should not be modified - * directly. Use ::cuGraphExternalSemaphoresSignalNodeSetParams to update the - * parameters of this node. - * - * \param hNode - Node to get the parameters for - * \param params_out - Pointer to return the parameters - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuLaunchKernel, - * ::cuGraphAddExternalSemaphoresSignalNode, - * ::cuGraphExternalSemaphoresSignalNodeSetParams, - * ::cuGraphAddExternalSemaphoresWaitNode, - * ::cuSignalExternalSemaphoresAsync, - * ::cuWaitExternalSemaphoresAsync - */ -CUresult CUDAAPI cuGraphExternalSemaphoresSignalNodeGetParams(CUgraphNode hNode, CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *params_out); - -/** - * \brief Sets an external semaphore signal node's parameters - * - * Sets the parameters of an external semaphore signal node \p hNode to \p nodeParams. - * - * \param hNode - Node to set the parameters for - * \param nodeParams - Parameters to copy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddExternalSemaphoresSignalNode, - * ::cuGraphExternalSemaphoresSignalNodeSetParams, - * ::cuGraphAddExternalSemaphoresWaitNode, - * ::cuSignalExternalSemaphoresAsync, - * ::cuWaitExternalSemaphoresAsync - */ -CUresult CUDAAPI cuGraphExternalSemaphoresSignalNodeSetParams(CUgraphNode hNode, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams); - -/** - * \brief Creates an external semaphore wait node and adds it to a graph - * - * Creates a new external semaphore wait node and adds it to \p hGraph with \p numDependencies - * dependencies specified via \p dependencies and arguments specified in \p nodeParams. - * It is possible for \p numDependencies to be 0, in which case the node will be placed - * at the root of the graph. \p dependencies may not have any duplicate entries. A handle - * to the new node will be returned in \p phGraphNode. - * - * Performs a wait operation on a set of externally allocated semaphore objects - * when the node is launched. The node's dependencies will not be launched until - * the wait operation has completed. - * - * \param phGraphNode - Returns newly created node - * \param hGraph - Graph to which to add the node - * \param dependencies - Dependencies of the node - * \param numDependencies - Number of dependencies - * \param nodeParams - Parameters for the node - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_NOT_SUPPORTED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphExternalSemaphoresWaitNodeGetParams, - * ::cuGraphExternalSemaphoresWaitNodeSetParams, - * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, - * ::cuGraphAddExternalSemaphoresSignalNode, - * ::cuImportExternalSemaphore, - * ::cuSignalExternalSemaphoresAsync, - * ::cuWaitExternalSemaphoresAsync, - * ::cuGraphCreate, - * ::cuGraphDestroyNode, - * ::cuGraphAddEventRecordNode, - * ::cuGraphAddEventWaitNode, - * ::cuGraphAddChildGraphNode, - * ::cuGraphAddEmptyNode, - * ::cuGraphAddKernelNode, - * ::cuGraphAddMemcpyNode, - * ::cuGraphAddMemsetNode, - */ -CUresult CUDAAPI cuGraphAddExternalSemaphoresWaitNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams); - -/** - * \brief Returns an external semaphore wait node's parameters - * - * Returns the parameters of an external semaphore wait node \p hNode in \p params_out. - * The \p extSemArray and \p paramsArray returned in \p params_out, - * are owned by the node. This memory remains valid until the node is destroyed or its - * parameters are modified, and should not be modified - * directly. Use ::cuGraphExternalSemaphoresSignalNodeSetParams to update the - * parameters of this node. - * - * \param hNode - Node to get the parameters for - * \param params_out - Pointer to return the parameters - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuLaunchKernel, - * ::cuGraphAddExternalSemaphoresWaitNode, - * ::cuGraphExternalSemaphoresWaitNodeSetParams, - * ::cuGraphAddExternalSemaphoresWaitNode, - * ::cuSignalExternalSemaphoresAsync, - * ::cuWaitExternalSemaphoresAsync - */ -CUresult CUDAAPI cuGraphExternalSemaphoresWaitNodeGetParams(CUgraphNode hNode, CUDA_EXT_SEM_WAIT_NODE_PARAMS *params_out); - -/** - * \brief Sets an external semaphore wait node's parameters - * - * Sets the parameters of an external semaphore wait node \p hNode to \p nodeParams. - * - * \param hNode - Node to set the parameters for - * \param nodeParams - Parameters to copy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddExternalSemaphoresWaitNode, - * ::cuGraphExternalSemaphoresWaitNodeSetParams, - * ::cuGraphAddExternalSemaphoresWaitNode, - * ::cuSignalExternalSemaphoresAsync, - * ::cuWaitExternalSemaphoresAsync - */ -CUresult CUDAAPI cuGraphExternalSemaphoresWaitNodeSetParams(CUgraphNode hNode, const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams); - -/** - * \brief Creates an allocation node and adds it to a graph - * - * Creates a new allocation node and adds it to \p hGraph with \p numDependencies - * dependencies specified via \p dependencies and arguments specified in \p nodeParams. - * It is possible for \p numDependencies to be 0, in which case the node will be placed - * at the root of the graph. \p dependencies may not have any duplicate entries. A handle - * to the new node will be returned in \p phGraphNode. - * - * \param phGraphNode - Returns newly created node - * \param hGraph - Graph to which to add the node - * \param dependencies - Dependencies of the node - * \param numDependencies - Number of dependencies - * \param nodeParams - Parameters for the node - * - * When ::cuGraphAddMemAllocNode creates an allocation node, it returns the address of the allocation in - * \param nodeParams.dptr. The allocation's address remains fixed across instantiations and launches. - * - * If the allocation is freed in the same graph, by creating a free node using ::cuGraphAddMemFreeNode, - * the allocation can be accessed by nodes ordered after the allocation node but before the free node. - * These allocations cannot be freed outside the owning graph, and they can only be freed once in the - * owning graph. - * - * If the allocation is not freed in the same graph, then it can be accessed not only by nodes in the - * graph which are ordered after the allocation node, but also by stream operations ordered after the - * graph's execution but before the allocation is freed. - * - * Allocations which are not freed in the same graph can be freed by: - * - passing the allocation to ::cuMemFreeAsync or ::cuMemFree; - * - launching a graph with a free node for that allocation; or - * - specifying ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH during instantiation, which makes - * each launch behave as though it called ::cuMemFreeAsync for every unfreed allocation. - * - * It is not possible to free an allocation in both the owning graph and another graph. If the allocation - * is freed in the same graph, a free node cannot be added to another graph. If the allocation is freed - * in another graph, a free node can no longer be added to the owning graph. - * - * The following restrictions apply to graphs which contain allocation and/or memory free nodes: - * - Nodes and edges of the graph cannot be deleted. - * - The graph cannot be used in a child node. - * - Only one instantiation of the graph may exist at any point in time. - * - The graph cannot be cloned. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_NOT_SUPPORTED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddMemFreeNode, - * ::cuGraphMemAllocNodeGetParams, - * ::cuDeviceGraphMemTrim, - * ::cuDeviceGetGraphMemAttribute, - * ::cuDeviceSetGraphMemAttribute, - * ::cuMemAllocAsync, - * ::cuMemFreeAsync, - * ::cuGraphCreate, - * ::cuGraphDestroyNode, - * ::cuGraphAddChildGraphNode, - * ::cuGraphAddEmptyNode, - * ::cuGraphAddEventRecordNode, - * ::cuGraphAddEventWaitNode, - * ::cuGraphAddExternalSemaphoresSignalNode, - * ::cuGraphAddExternalSemaphoresWaitNode, - * ::cuGraphAddKernelNode, - * ::cuGraphAddMemcpyNode, - * ::cuGraphAddMemsetNode - */ -CUresult CUDAAPI cuGraphAddMemAllocNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUDA_MEM_ALLOC_NODE_PARAMS *nodeParams); - -/** - * \brief Returns a memory alloc node's parameters - * - * Returns the parameters of a memory alloc node \p hNode in \p params_out. - * The \p poolProps and \p accessDescs returned in \p params_out, are owned by the - * node. This memory remains valid until the node is destroyed. The returned - * parameters must not be modified. - * - * \param hNode - Node to get the parameters for - * \param params_out - Pointer to return the parameters - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddMemAllocNode, - * ::cuGraphMemFreeNodeGetParams - */ -CUresult CUDAAPI cuGraphMemAllocNodeGetParams(CUgraphNode hNode, CUDA_MEM_ALLOC_NODE_PARAMS *params_out); - -/** - * \brief Creates a memory free node and adds it to a graph - * - * Creates a new memory free node and adds it to \p hGraph with \p numDependencies - * dependencies specified via \p dependencies and arguments specified in \p nodeParams. - * It is possible for \p numDependencies to be 0, in which case the node will be placed - * at the root of the graph. \p dependencies may not have any duplicate entries. A handle - * to the new node will be returned in \p phGraphNode. - * - * \param phGraphNode - Returns newly created node - * \param hGraph - Graph to which to add the node - * \param dependencies - Dependencies of the node - * \param numDependencies - Number of dependencies - * \param dptr - Address of memory to free - * - * ::cuGraphAddMemFreeNode will return ::CUDA_ERROR_INVALID_VALUE if the user attempts to free: - * - an allocation twice in the same graph. - * - an address that was not returned by an allocation node. - * - an invalid address. - * - * The following restrictions apply to graphs which contain allocation and/or memory free nodes: - * - Nodes and edges of the graph cannot be deleted. - * - The graph cannot be used in a child node. - * - Only one instantiation of the graph may exist at any point in time. - * - The graph cannot be cloned. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_NOT_SUPPORTED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddMemAllocNode, - * ::cuGraphMemFreeNodeGetParams, - * ::cuDeviceGraphMemTrim, - * ::cuDeviceGetGraphMemAttribute, - * ::cuDeviceSetGraphMemAttribute, - * ::cuMemAllocAsync, - * ::cuMemFreeAsync, - * ::cuGraphCreate, - * ::cuGraphDestroyNode, - * ::cuGraphAddChildGraphNode, - * ::cuGraphAddEmptyNode, - * ::cuGraphAddEventRecordNode, - * ::cuGraphAddEventWaitNode, - * ::cuGraphAddExternalSemaphoresSignalNode, - * ::cuGraphAddExternalSemaphoresWaitNode, - * ::cuGraphAddKernelNode, - * ::cuGraphAddMemcpyNode, - * ::cuGraphAddMemsetNode - */ -CUresult CUDAAPI cuGraphAddMemFreeNode(CUgraphNode *phGraphNode, CUgraph hGraph, const CUgraphNode *dependencies, size_t numDependencies, CUdeviceptr dptr); - -/** - * \brief Returns a memory free node's parameters - * - * Returns the address of a memory free node \p hNode in \p dptr_out. - * - * \param hNode - Node to get the parameters for - * \param dptr_out - Pointer to return the device address - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddMemFreeNode, - * ::cuGraphMemAllocNodeGetParams - */ -CUresult CUDAAPI cuGraphMemFreeNodeGetParams(CUgraphNode hNode, CUdeviceptr *dptr_out); - -/** - * \brief Free unused memory that was cached on the specified device for use with graphs back to the OS. - * - * Blocks which are not in use by a graph that is either currently executing or scheduled to execute are - * freed back to the operating system. - * - * \param device - The device for which cached memory should be freed. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_DEVICE - * - * \sa - * ::cuGraphAddMemAllocNode, - * ::cuGraphAddMemFreeNode - */ -CUresult CUDAAPI cuDeviceGraphMemTrim(CUdevice device); - -/** - * \brief Query asynchronous allocation attributes related to graphs - * - * Valid attributes are: - * - * - ::CU_GRAPH_MEM_ATTR_USED_MEM_CURRENT: Amount of memory, in bytes, currently associated with graphs - * - ::CU_GRAPH_MEM_ATTR_USED_MEM_HIGH: High watermark of memory, in bytes, associated with graphs since the - * last time it was reset. High watermark can only be reset to zero. - * - ::CU_GRAPH_MEM_ATTR_RESERVED_MEM_CURRENT: Amount of memory, in bytes, currently allocated for use by - * the CUDA graphs asynchronous allocator. - * - ::CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH: High watermark of memory, in bytes, currently allocated for use by - * the CUDA graphs asynchronous allocator. - * - * \param device - Specifies the scope of the query - * \param attr - attribute to get - * \param value - retrieved value - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_DEVICE - * - * \sa - * ::cuGraphAddMemAllocNode, - * ::cuGraphAddMemFreeNode - */ -CUresult CUDAAPI cuDeviceGetGraphMemAttribute(CUdevice device, CUgraphMem_attribute attr, void* value); - -/** - * \brief Set asynchronous allocation attributes related to graphs - * - * Valid attributes are: - * - * - ::CU_GRAPH_MEM_ATTR_USED_MEM_HIGH: High watermark of memory, in bytes, associated with graphs since the - * last time it was reset. High watermark can only be reset to zero. - * - ::CU_GRAPH_MEM_ATTR_RESERVED_MEM_HIGH: High watermark of memory, in bytes, currently allocated for use by - * the CUDA graphs asynchronous allocator. - * - * \param device - Specifies the scope of the query - * \param attr - attribute to get - * \param value - pointer to value to set - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_DEVICE - * - * \sa - * ::cuGraphAddMemAllocNode, - * ::cuGraphAddMemFreeNode - */ -CUresult CUDAAPI cuDeviceSetGraphMemAttribute(CUdevice device, CUgraphMem_attribute attr, void* value); - -/** - * \brief Clones a graph - * - * This function creates a copy of \p originalGraph and returns it in \p phGraphClone. - * All parameters are copied into the cloned graph. The original graph may be modified - * after this call without affecting the clone. - * - * Child graph nodes in the original graph are recursively copied into the clone. - * - * \param phGraphClone - Returns newly created cloned graph - * \param originalGraph - Graph to clone - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OUT_OF_MEMORY - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphCreate, - * ::cuGraphNodeFindInClone - */ -CUresult CUDAAPI cuGraphClone(CUgraph *phGraphClone, CUgraph originalGraph); - -/** - * \brief Finds a cloned version of a node - * - * This function returns the node in \p hClonedGraph corresponding to \p hOriginalNode - * in the original graph. - * - * \p hClonedGraph must have been cloned from \p hOriginalGraph via ::cuGraphClone. - * \p hOriginalNode must have been in \p hOriginalGraph at the time of the call to - * ::cuGraphClone, and the corresponding cloned node in \p hClonedGraph must not have - * been removed. The cloned node is then returned via \p phClonedNode. - * - * \param phNode - Returns handle to the cloned node - * \param hOriginalNode - Handle to the original node - * \param hClonedGraph - Cloned graph to query - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphClone - */ -CUresult CUDAAPI cuGraphNodeFindInClone(CUgraphNode *phNode, CUgraphNode hOriginalNode, CUgraph hClonedGraph); - -/** - * \brief Returns a node's type - * - * Returns the node type of \p hNode in \p type. - * - * \param hNode - Node to query - * \param type - Pointer to return the node type - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphGetNodes, - * ::cuGraphGetRootNodes, - * ::cuGraphChildGraphNodeGetGraph, - * ::cuGraphKernelNodeGetParams, - * ::cuGraphKernelNodeSetParams, - * ::cuGraphHostNodeGetParams, - * ::cuGraphHostNodeSetParams, - * ::cuGraphMemcpyNodeGetParams, - * ::cuGraphMemcpyNodeSetParams, - * ::cuGraphMemsetNodeGetParams, - * ::cuGraphMemsetNodeSetParams - */ -CUresult CUDAAPI cuGraphNodeGetType(CUgraphNode hNode, CUgraphNodeType *type); - -/** - * \brief Returns a graph's nodes - * - * Returns a list of \p hGraph's nodes. \p nodes may be NULL, in which case this - * function will return the number of nodes in \p numNodes. Otherwise, - * \p numNodes entries will be filled in. If \p numNodes is higher than the actual - * number of nodes, the remaining entries in \p nodes will be set to NULL, and the - * number of nodes actually obtained will be returned in \p numNodes. - * - * \param hGraph - Graph to query - * \param nodes - Pointer to return the nodes - * \param numNodes - See description - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphCreate, - * ::cuGraphGetRootNodes, - * ::cuGraphGetEdges, - * ::cuGraphNodeGetType, - * ::cuGraphNodeGetDependencies, - * ::cuGraphNodeGetDependentNodes - */ -CUresult CUDAAPI cuGraphGetNodes(CUgraph hGraph, CUgraphNode *nodes, size_t *numNodes); - -/** - * \brief Returns a graph's root nodes - * - * Returns a list of \p hGraph's root nodes. \p rootNodes may be NULL, in which case this - * function will return the number of root nodes in \p numRootNodes. Otherwise, - * \p numRootNodes entries will be filled in. If \p numRootNodes is higher than the actual - * number of root nodes, the remaining entries in \p rootNodes will be set to NULL, and the - * number of nodes actually obtained will be returned in \p numRootNodes. - * - * \param hGraph - Graph to query - * \param rootNodes - Pointer to return the root nodes - * \param numRootNodes - See description - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphCreate, - * ::cuGraphGetNodes, - * ::cuGraphGetEdges, - * ::cuGraphNodeGetType, - * ::cuGraphNodeGetDependencies, - * ::cuGraphNodeGetDependentNodes - */ -CUresult CUDAAPI cuGraphGetRootNodes(CUgraph hGraph, CUgraphNode *rootNodes, size_t *numRootNodes); - -/** - * \brief Returns a graph's dependency edges - * - * Returns a list of \p hGraph's dependency edges. Edges are returned via corresponding - * indices in \p from and \p to; that is, the node in \p to[i] has a dependency on the - * node in \p from[i]. \p from and \p to may both be NULL, in which - * case this function only returns the number of edges in \p numEdges. Otherwise, - * \p numEdges entries will be filled in. If \p numEdges is higher than the actual - * number of edges, the remaining entries in \p from and \p to will be set to NULL, and - * the number of edges actually returned will be written to \p numEdges. - * - * \param hGraph - Graph to get the edges from - * \param from - Location to return edge endpoints - * \param to - Location to return edge endpoints - * \param numEdges - See description - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphGetNodes, - * ::cuGraphGetRootNodes, - * ::cuGraphAddDependencies, - * ::cuGraphRemoveDependencies, - * ::cuGraphNodeGetDependencies, - * ::cuGraphNodeGetDependentNodes - */ -CUresult CUDAAPI cuGraphGetEdges(CUgraph hGraph, CUgraphNode *from, CUgraphNode *to, size_t *numEdges); - -/** - * \brief Returns a node's dependencies - * - * Returns a list of \p node's dependencies. \p dependencies may be NULL, in which case this - * function will return the number of dependencies in \p numDependencies. Otherwise, - * \p numDependencies entries will be filled in. If \p numDependencies is higher than the actual - * number of dependencies, the remaining entries in \p dependencies will be set to NULL, and the - * number of nodes actually obtained will be returned in \p numDependencies. - * - * \param hNode - Node to query - * \param dependencies - Pointer to return the dependencies - * \param numDependencies - See description - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphNodeGetDependentNodes, - * ::cuGraphGetNodes, - * ::cuGraphGetRootNodes, - * ::cuGraphGetEdges, - * ::cuGraphAddDependencies, - * ::cuGraphRemoveDependencies - */ -CUresult CUDAAPI cuGraphNodeGetDependencies(CUgraphNode hNode, CUgraphNode *dependencies, size_t *numDependencies); - -/** - * \brief Returns a node's dependent nodes - * - * Returns a list of \p node's dependent nodes. \p dependentNodes may be NULL, in which - * case this function will return the number of dependent nodes in \p numDependentNodes. - * Otherwise, \p numDependentNodes entries will be filled in. If \p numDependentNodes is - * higher than the actual number of dependent nodes, the remaining entries in - * \p dependentNodes will be set to NULL, and the number of nodes actually obtained will - * be returned in \p numDependentNodes. - * - * \param hNode - Node to query - * \param dependentNodes - Pointer to return the dependent nodes - * \param numDependentNodes - See description - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphNodeGetDependencies, - * ::cuGraphGetNodes, - * ::cuGraphGetRootNodes, - * ::cuGraphGetEdges, - * ::cuGraphAddDependencies, - * ::cuGraphRemoveDependencies - */ -CUresult CUDAAPI cuGraphNodeGetDependentNodes(CUgraphNode hNode, CUgraphNode *dependentNodes, size_t *numDependentNodes); - -/** - * \brief Adds dependency edges to a graph - * - * The number of dependencies to be added is defined by \p numDependencies - * Elements in \p from and \p to at corresponding indices define a dependency. - * Each node in \p from and \p to must belong to \p hGraph. - * - * If \p numDependencies is 0, elements in \p from and \p to will be ignored. - * Specifying an existing dependency will return an error. - * - * \param hGraph - Graph to which dependencies are added - * \param from - Array of nodes that provide the dependencies - * \param to - Array of dependent nodes - * \param numDependencies - Number of dependencies to be added - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphRemoveDependencies, - * ::cuGraphGetEdges, - * ::cuGraphNodeGetDependencies, - * ::cuGraphNodeGetDependentNodes - */ -CUresult CUDAAPI cuGraphAddDependencies(CUgraph hGraph, const CUgraphNode *from, const CUgraphNode *to, size_t numDependencies); - -/** - * \brief Removes dependency edges from a graph - * - * The number of \p dependencies to be removed is defined by \p numDependencies. - * Elements in \p from and \p to at corresponding indices define a dependency. - * Each node in \p from and \p to must belong to \p hGraph. - * - * If \p numDependencies is 0, elements in \p from and \p to will be ignored. - * Specifying a non-existing dependency will return an error. - * - * Dependencies cannot be removed from graphs which contain allocation or free nodes. - * Any attempt to do so will return an error. - * - * \param hGraph - Graph from which to remove dependencies - * \param from - Array of nodes that provide the dependencies - * \param to - Array of dependent nodes - * \param numDependencies - Number of dependencies to be removed - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddDependencies, - * ::cuGraphGetEdges, - * ::cuGraphNodeGetDependencies, - * ::cuGraphNodeGetDependentNodes - */ -CUresult CUDAAPI cuGraphRemoveDependencies(CUgraph hGraph, const CUgraphNode *from, const CUgraphNode *to, size_t numDependencies); - -/** - * \brief Remove a node from the graph - * - * Removes \p hNode from its graph. This operation also severs any dependencies of other nodes - * on \p hNode and vice versa. - * - * Nodes which belong to a graph which contains allocation or free nodes cannot be destroyed. - * Any attempt to do so will return an error. - * - * \param hNode - Node to remove - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddChildGraphNode, - * ::cuGraphAddEmptyNode, - * ::cuGraphAddKernelNode, - * ::cuGraphAddHostNode, - * ::cuGraphAddMemcpyNode, - * ::cuGraphAddMemsetNode - */ -CUresult CUDAAPI cuGraphDestroyNode(CUgraphNode hNode); - -/** - * \brief Creates an executable graph from a graph - * - * Instantiates \p hGraph as an executable graph. The graph is validated for any - * structural constraints or intra-node constraints which were not previously - * validated. If instantiation is successful, a handle to the instantiated graph - * is returned in \p phGraphExec. - * - * If there are any errors, diagnostic information may be returned in \p errorNode and - * \p logBuffer. This is the primary way to inspect instantiation errors. The output - * will be null terminated unless the diagnostics overflow - * the buffer. In this case, they will be truncated, and the last byte can be - * inspected to determine if truncation occurred. - * - * \param phGraphExec - Returns instantiated graph - * \param hGraph - Graph to instantiate - * \param phErrorNode - In case of an instantiation error, this may be modified to - * indicate a node contributing to the error - * \param logBuffer - A character buffer to store diagnostic messages - * \param bufferSize - Size of the log buffer in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphInstantiateWithFlags, - * ::cuGraphCreate, - * ::cuGraphUpload, - * ::cuGraphLaunch, - * ::cuGraphExecDestroy - */ -CUresult CUDAAPI cuGraphInstantiate(CUgraphExec *phGraphExec, CUgraph hGraph, CUgraphNode *phErrorNode, char *logBuffer, size_t bufferSize); - -/** - * \brief Creates an executable graph from a graph - * - * Instantiates \p hGraph as an executable graph. The graph is validated for any - * structural constraints or intra-node constraints which were not previously - * validated. If instantiation is successful, a handle to the instantiated graph - * is returned in \p phGraphExec. - * - * The \p flags parameter controls the behavior of instantiation and subsequent - * graph launches. Valid flags are: - * - * - ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH, which configures a - * graph containing memory allocation nodes to automatically free any - * unfreed memory allocations before the graph is relaunched. - * - * If \p hGraph contains any allocation or free nodes, there can be at most one - * executable graph in existence for that graph at a time. - * - * An attempt to instantiate a second executable graph before destroying the first - * with ::cuGraphExecDestroy will result in an error. - * - * \param phGraphExec - Returns instantiated graph - * \param hGraph - Graph to instantiate - * \param flags - Flags to control instantiation. See ::CUgraphInstantiate_flags. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphInstantiate, - * ::cuGraphCreate, - * ::cuGraphUpload, - * ::cuGraphLaunch, - * ::cuGraphExecDestroy - */ -CUresult CUDAAPI cuGraphInstantiateWithFlags(CUgraphExec *phGraphExec, CUgraph hGraph, unsigned long long flags); - -/** - * \brief Sets the parameters for a kernel node in the given graphExec - * - * Sets the parameters of a kernel node in an executable graph \p hGraphExec. - * The node is identified by the corresponding node \p hNode in the - * non-executable graph, from which the executable graph was instantiated. - * - * \p hNode must not have been removed from the original graph. The \p func field - * of \p nodeParams cannot be modified and must match the original value. - * All other values can be modified. - * - * The modifications only affect future launches of \p hGraphExec. Already - * enqueued or running launches of \p hGraphExec are not affected by this call. - * \p hNode is also not modified by this call. - * - * \param hGraphExec - The executable graph in which to set the specified node - * \param hNode - kernel node from the graph from which graphExec was instantiated - * \param nodeParams - Updated Parameters to set - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddKernelNode, - * ::cuGraphKernelNodeSetParams, - * ::cuGraphExecMemcpyNodeSetParams, - * ::cuGraphExecMemsetNodeSetParams, - * ::cuGraphExecHostNodeSetParams, - * ::cuGraphExecChildGraphNodeSetParams, - * ::cuGraphExecEventRecordNodeSetEvent, - * ::cuGraphExecEventWaitNodeSetEvent, - * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, - * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, - * ::cuGraphExecUpdate, - * ::cuGraphInstantiate - */ -CUresult CUDAAPI cuGraphExecKernelNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_KERNEL_NODE_PARAMS *nodeParams); - -/** - * \brief Sets the parameters for a memcpy node in the given graphExec. - * - * Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had - * contained \p copyParams at instantiation. hNode must remain in the graph which was - * used to instantiate \p hGraphExec. Changed edges to and from hNode are ignored. - * - * The source and destination memory in \p copyParams must be allocated from the same - * contexts as the original source and destination memory. Both the instantiation-time - * memory operands and the memory operands in \p copyParams must be 1-dimensional. - * Zero-length operations are not supported. - * - * The modifications only affect future launches of \p hGraphExec. Already enqueued - * or running launches of \p hGraphExec are not affected by this call. hNode is also - * not modified by this call. - * - * Returns CUDA_ERROR_INVALID_VALUE if the memory operands' mappings changed or - * either the original or new memory operands are multidimensional. - * - * \param hGraphExec - The executable graph in which to set the specified node - * \param hNode - Memcpy node from the graph which was used to instantiate graphExec - * \param copyParams - The updated parameters to set - * \param ctx - Context on which to run the node - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddMemcpyNode, - * ::cuGraphMemcpyNodeSetParams, - * ::cuGraphExecKernelNodeSetParams, - * ::cuGraphExecMemsetNodeSetParams, - * ::cuGraphExecHostNodeSetParams, - * ::cuGraphExecChildGraphNodeSetParams, - * ::cuGraphExecEventRecordNodeSetEvent, - * ::cuGraphExecEventWaitNodeSetEvent, - * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, - * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, - * ::cuGraphExecUpdate, - * ::cuGraphInstantiate - */ -CUresult CUDAAPI cuGraphExecMemcpyNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_MEMCPY3D *copyParams, CUcontext ctx); - -/** - * \brief Sets the parameters for a memset node in the given graphExec. - * - * Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had - * contained \p memsetParams at instantiation. hNode must remain in the graph which was - * used to instantiate \p hGraphExec. Changed edges to and from hNode are ignored. - * - * The destination memory in \p memsetParams must be allocated from the same - * contexts as the original destination memory. Both the instantiation-time - * memory operand and the memory operand in \p memsetParams must be 1-dimensional. - * Zero-length operations are not supported. - * - * The modifications only affect future launches of \p hGraphExec. Already enqueued - * or running launches of \p hGraphExec are not affected by this call. hNode is also - * not modified by this call. - * - * Returns CUDA_ERROR_INVALID_VALUE if the memory operand's mappings changed or - * either the original or new memory operand are multidimensional. - * - * \param hGraphExec - The executable graph in which to set the specified node - * \param hNode - Memset node from the graph which was used to instantiate graphExec - * \param memsetParams - The updated parameters to set - * \param ctx - Context on which to run the node - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddMemsetNode, - * ::cuGraphMemsetNodeSetParams, - * ::cuGraphExecKernelNodeSetParams, - * ::cuGraphExecMemcpyNodeSetParams, - * ::cuGraphExecHostNodeSetParams, - * ::cuGraphExecChildGraphNodeSetParams, - * ::cuGraphExecEventRecordNodeSetEvent, - * ::cuGraphExecEventWaitNodeSetEvent, - * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, - * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, - * ::cuGraphExecUpdate, - * ::cuGraphInstantiate - */ -CUresult CUDAAPI cuGraphExecMemsetNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_MEMSET_NODE_PARAMS *memsetParams, CUcontext ctx); - -/** - * \brief Sets the parameters for a host node in the given graphExec. - * - * Updates the work represented by \p hNode in \p hGraphExec as though \p hNode had - * contained \p nodeParams at instantiation. hNode must remain in the graph which was - * used to instantiate \p hGraphExec. Changed edges to and from hNode are ignored. - * - * The modifications only affect future launches of \p hGraphExec. Already enqueued - * or running launches of \p hGraphExec are not affected by this call. hNode is also - * not modified by this call. - * - * \param hGraphExec - The executable graph in which to set the specified node - * \param hNode - Host node from the graph which was used to instantiate graphExec - * \param nodeParams - The updated parameters to set - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddHostNode, - * ::cuGraphHostNodeSetParams, - * ::cuGraphExecKernelNodeSetParams, - * ::cuGraphExecMemcpyNodeSetParams, - * ::cuGraphExecMemsetNodeSetParams, - * ::cuGraphExecChildGraphNodeSetParams, - * ::cuGraphExecEventRecordNodeSetEvent, - * ::cuGraphExecEventWaitNodeSetEvent, - * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, - * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, - * ::cuGraphExecUpdate, - * ::cuGraphInstantiate - */ -CUresult CUDAAPI cuGraphExecHostNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_HOST_NODE_PARAMS *nodeParams); - -/** - * \brief Updates node parameters in the child graph node in the given graphExec. - * - * Updates the work represented by \p hNode in \p hGraphExec as though the nodes contained - * in \p hNode's graph had the parameters contained in \p childGraph's nodes at instantiation. - * \p hNode must remain in the graph which was used to instantiate \p hGraphExec. - * Changed edges to and from \p hNode are ignored. - * - * The modifications only affect future launches of \p hGraphExec. Already enqueued - * or running launches of \p hGraphExec are not affected by this call. \p hNode is also - * not modified by this call. - * - * The topology of \p childGraph, as well as the node insertion order, must match that - * of the graph contained in \p hNode. See ::cuGraphExecUpdate() for a list of restrictions - * on what can be updated in an instantiated graph. The update is recursive, so child graph - * nodes contained within the top level child graph will also be updated. - * - * \param hGraphExec - The executable graph in which to set the specified node - * \param hNode - Host node from the graph which was used to instantiate graphExec - * \param childGraph - The graph supplying the updated parameters - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddChildGraphNode, - * ::cuGraphChildGraphNodeGetGraph, - * ::cuGraphExecKernelNodeSetParams, - * ::cuGraphExecMemcpyNodeSetParams, - * ::cuGraphExecMemsetNodeSetParams, - * ::cuGraphExecHostNodeSetParams, - * ::cuGraphExecEventRecordNodeSetEvent, - * ::cuGraphExecEventWaitNodeSetEvent, - * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, - * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, - * ::cuGraphExecUpdate, - * ::cuGraphInstantiate - */ -CUresult CUDAAPI cuGraphExecChildGraphNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, CUgraph childGraph); - -/** - * \brief Sets the event for an event record node in the given graphExec - * - * Sets the event of an event record node in an executable graph \p hGraphExec. - * The node is identified by the corresponding node \p hNode in the - * non-executable graph, from which the executable graph was instantiated. - * - * The modifications only affect future launches of \p hGraphExec. Already - * enqueued or running launches of \p hGraphExec are not affected by this call. - * \p hNode is also not modified by this call. - * - * \param hGraphExec - The executable graph in which to set the specified node - * \param hNode - event record node from the graph from which graphExec was instantiated - * \param event - Updated event to use - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddEventRecordNode, - * ::cuGraphEventRecordNodeGetEvent, - * ::cuGraphEventWaitNodeSetEvent, - * ::cuEventRecordWithFlags, - * ::cuStreamWaitEvent, - * ::cuGraphExecKernelNodeSetParams, - * ::cuGraphExecMemcpyNodeSetParams, - * ::cuGraphExecMemsetNodeSetParams, - * ::cuGraphExecHostNodeSetParams, - * ::cuGraphExecChildGraphNodeSetParams, - * ::cuGraphExecEventWaitNodeSetEvent, - * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, - * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, - * ::cuGraphExecUpdate, - * ::cuGraphInstantiate - */ -CUresult CUDAAPI cuGraphExecEventRecordNodeSetEvent(CUgraphExec hGraphExec, CUgraphNode hNode, CUevent event); - -/** - * \brief Sets the event for an event wait node in the given graphExec - * - * Sets the event of an event wait node in an executable graph \p hGraphExec. - * The node is identified by the corresponding node \p hNode in the - * non-executable graph, from which the executable graph was instantiated. - * - * The modifications only affect future launches of \p hGraphExec. Already - * enqueued or running launches of \p hGraphExec are not affected by this call. - * \p hNode is also not modified by this call. - * - * \param hGraphExec - The executable graph in which to set the specified node - * \param hNode - event wait node from the graph from which graphExec was instantiated - * \param event - Updated event to use - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddEventWaitNode, - * ::cuGraphEventWaitNodeGetEvent, - * ::cuGraphEventRecordNodeSetEvent, - * ::cuEventRecordWithFlags, - * ::cuStreamWaitEvent, - * ::cuGraphExecKernelNodeSetParams, - * ::cuGraphExecMemcpyNodeSetParams, - * ::cuGraphExecMemsetNodeSetParams, - * ::cuGraphExecHostNodeSetParams, - * ::cuGraphExecChildGraphNodeSetParams, - * ::cuGraphExecEventRecordNodeSetEvent, - * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, - * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, - * ::cuGraphExecUpdate, - * ::cuGraphInstantiate - */ -CUresult CUDAAPI cuGraphExecEventWaitNodeSetEvent(CUgraphExec hGraphExec, CUgraphNode hNode, CUevent event); - -/** - * \brief Sets the parameters for an external semaphore signal node in the given graphExec - * - * Sets the parameters of an external semaphore signal node in an executable graph \p hGraphExec. - * The node is identified by the corresponding node \p hNode in the - * non-executable graph, from which the executable graph was instantiated. - * - * \p hNode must not have been removed from the original graph. - * - * The modifications only affect future launches of \p hGraphExec. Already - * enqueued or running launches of \p hGraphExec are not affected by this call. - * \p hNode is also not modified by this call. - * - * Changing \p nodeParams->numExtSems is not supported. - * - * \param hGraphExec - The executable graph in which to set the specified node - * \param hNode - semaphore signal node from the graph from which graphExec was instantiated - * \param nodeParams - Updated Parameters to set - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddExternalSemaphoresSignalNode, - * ::cuImportExternalSemaphore, - * ::cuSignalExternalSemaphoresAsync, - * ::cuWaitExternalSemaphoresAsync, - * ::cuGraphExecKernelNodeSetParams, - * ::cuGraphExecMemcpyNodeSetParams, - * ::cuGraphExecMemsetNodeSetParams, - * ::cuGraphExecHostNodeSetParams, - * ::cuGraphExecChildGraphNodeSetParams, - * ::cuGraphExecEventRecordNodeSetEvent, - * ::cuGraphExecEventWaitNodeSetEvent, - * ::cuGraphExecExternalSemaphoresWaitNodeSetParams, - * ::cuGraphExecUpdate, - * ::cuGraphInstantiate - */ -CUresult CUDAAPI cuGraphExecExternalSemaphoresSignalNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_EXT_SEM_SIGNAL_NODE_PARAMS *nodeParams); - -/** - * \brief Sets the parameters for an external semaphore wait node in the given graphExec - * - * Sets the parameters of an external semaphore wait node in an executable graph \p hGraphExec. - * The node is identified by the corresponding node \p hNode in the - * non-executable graph, from which the executable graph was instantiated. - * - * \p hNode must not have been removed from the original graph. - * - * The modifications only affect future launches of \p hGraphExec. Already - * enqueued or running launches of \p hGraphExec are not affected by this call. - * \p hNode is also not modified by this call. - * - * Changing \p nodeParams->numExtSems is not supported. - * - * \param hGraphExec - The executable graph in which to set the specified node - * \param hNode - semaphore wait node from the graph from which graphExec was instantiated - * \param nodeParams - Updated Parameters to set - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphAddExternalSemaphoresWaitNode, - * ::cuImportExternalSemaphore, - * ::cuSignalExternalSemaphoresAsync, - * ::cuWaitExternalSemaphoresAsync, - * ::cuGraphExecKernelNodeSetParams, - * ::cuGraphExecMemcpyNodeSetParams, - * ::cuGraphExecMemsetNodeSetParams, - * ::cuGraphExecHostNodeSetParams, - * ::cuGraphExecChildGraphNodeSetParams, - * ::cuGraphExecEventRecordNodeSetEvent, - * ::cuGraphExecEventWaitNodeSetEvent, - * ::cuGraphExecExternalSemaphoresSignalNodeSetParams, - * ::cuGraphExecUpdate, - * ::cuGraphInstantiate - */ -CUresult CUDAAPI cuGraphExecExternalSemaphoresWaitNodeSetParams(CUgraphExec hGraphExec, CUgraphNode hNode, const CUDA_EXT_SEM_WAIT_NODE_PARAMS *nodeParams); - -/** - * \brief Uploads an executable graph in a stream - * - * Uploads \p hGraphExec to the device in \p hStream without executing it. Uploads of - * the same \p hGraphExec will be serialized. Each upload is ordered behind both any - * previous work in \p hStream and any previous launches of \p hGraphExec. - * Uses memory cached by \p stream to back the allocations owned by \p hGraphExec. - * - * \param hGraphExec - Executable graph to upload - * \param hStream - Stream in which to upload the graph - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphInstantiate, - * ::cuGraphLaunch, - * ::cuGraphExecDestroy - */ -CUresult CUDAAPI cuGraphUpload(CUgraphExec hGraphExec, CUstream hStream); - -/** - * \brief Launches an executable graph in a stream - * - * Executes \p hGraphExec in \p hStream. Only one instance of \p hGraphExec may be executing - * at a time. Each launch is ordered behind both any previous work in \p hStream - * and any previous launches of \p hGraphExec. To execute a graph concurrently, it must be - * instantiated multiple times into multiple executable graphs. - * - * If any allocations created by \p hGraphExec remain unfreed (from a previous launch) and - * \p hGraphExec was not instantiated with ::CUDA_GRAPH_INSTANTIATE_FLAG_AUTO_FREE_ON_LAUNCH, - * the launch will fail with ::CUDA_ERROR_INVALID_VALUE. - * - * \param hGraphExec - Executable graph to launch - * \param hStream - Stream in which to launch the graph - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphInstantiate, - * ::cuGraphUpload, - * ::cuGraphExecDestroy - */ -CUresult CUDAAPI cuGraphLaunch(CUgraphExec hGraphExec, CUstream hStream); - -/** - * \brief Destroys an executable graph - * - * Destroys the executable graph specified by \p hGraphExec, as well - * as all of its executable nodes. If the executable graph is - * in-flight, it will not be terminated, but rather freed - * asynchronously on completion. - * - * \param hGraphExec - Executable graph to destroy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphInstantiate, - * ::cuGraphUpload, - * ::cuGraphLaunch - */ -CUresult CUDAAPI cuGraphExecDestroy(CUgraphExec hGraphExec); - -/** - * \brief Destroys a graph - * - * Destroys the graph specified by \p hGraph, as well as all of its nodes. - * - * \param hGraph - Graph to destroy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_VALUE - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphCreate - */ -CUresult CUDAAPI cuGraphDestroy(CUgraph hGraph); - -/** - * \brief Check whether an executable graph can be updated with a graph and perform the update if possible - * - * Updates the node parameters in the instantiated graph specified by \p hGraphExec with the - * node parameters in a topologically identical graph specified by \p hGraph. - * - * Limitations: - * - * - Kernel nodes: - * - The owning context of the function cannot change. - * - A node whose function originally did not use CUDA dynamic parallelism cannot be updated - * to a function which uses CDP - * - Memset and memcpy nodes: - * - The CUDA device(s) to which the operand(s) was allocated/mapped cannot change. - * - The source/destination memory must be allocated from the same contexts as the original - * source/destination memory. - * - Only 1D memsets can be changed. - * - Additional memcpy node restrictions: - * - Changing either the source or destination memory type(i.e. CU_MEMORYTYPE_DEVICE, - * CU_MEMORYTYPE_ARRAY, etc.) is not supported. - * - External semaphore wait nodes and record nodes: - * - Changing the number of semaphores is not supported. - * - * Note: The API may add further restrictions in future releases. The return code should always be checked. - * - * cuGraphExecUpdate sets \p updateResult_out to CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED under - * the following conditions: - * - * - The count of nodes directly in \p hGraphExec and \p hGraph differ, in which case \p hErrorNode_out - * is NULL. - * - A node is deleted in \p hGraph but not not its pair from \p hGraphExec, in which case \p hErrorNode_out - * is NULL. - * - A node is deleted in \p hGraphExec but not its pair from \p hGraph, in which case \p hErrorNode_out is - * the pairless node from \p hGraph. - * - The dependent nodes of a pair differ, in which case \p hErrorNode_out is the node from \p hGraph. - * - * cuGraphExecUpdate sets \p updateResult_out to: - * - CU_GRAPH_EXEC_UPDATE_ERROR if passed an invalid value. - * - CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED if the graph topology changed - * - CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED if the type of a node changed, in which case - * \p hErrorNode_out is set to the node from \p hGraph. - * - CU_GRAPH_EXEC_UPDATE_ERROR_UNSUPPORTED_FUNCTION_CHANGE if the function changed in an unsupported - * way(see note above), in which case \p hErrorNode_out is set to the node from \p hGraph - * - CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED if any parameters to a node changed in a way - * that is not supported, in which case \p hErrorNode_out is set to the node from \p hGraph. - * - CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED if something about a node is unsupported, like - * the node's type or configuration, in which case \p hErrorNode_out is set to the node from \p hGraph - * - * If \p updateResult_out isn't set in one of the situations described above, the update check passes - * and cuGraphExecUpdate updates \p hGraphExec to match the contents of \p hGraph. If an error happens - * during the update, \p updateResult_out will be set to CU_GRAPH_EXEC_UPDATE_ERROR; otherwise, - * \p updateResult_out is set to CU_GRAPH_EXEC_UPDATE_SUCCESS. - * - * cuGraphExecUpdate returns CUDA_SUCCESS when the updated was performed successfully. It returns - * CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE if the graph update was not performed because it included - * changes which violated constraints specific to instantiated graph update. - * - * \param hGraphExec The instantiated graph to be updated - * \param hGraph The graph containing the updated parameters - * \param hErrorNode_out The node which caused the permissibility check to forbid the update, if any - * \param updateResult_out Whether the graph update was permitted. If was forbidden, the reason why - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE, - * \note_graph_thread_safety - * \notefnerr - * - * \sa - * ::cuGraphInstantiate, - */ -CUresult CUDAAPI cuGraphExecUpdate(CUgraphExec hGraphExec, CUgraph hGraph, CUgraphNode *hErrorNode_out, CUgraphExecUpdateResult *updateResult_out); - -/** - * \brief Copies attributes from source node to destination node. - * - * Copies attributes from source node \p src to destination node \p dst. - * Both node must have the same context. - * - * \param[out] dst Destination node - * \param[in] src Source node - * For list of attributes see ::CUkernelNodeAttrID - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa - * ::CUaccessPolicyWindow - */ -CUresult CUDAAPI cuGraphKernelNodeCopyAttributes(CUgraphNode dst, CUgraphNode src); - -/** - * \brief Queries node attribute. - * - * Queries attribute \p attr from node \p hNode and stores it in corresponding - * member of \p value_out. - * - * \param[in] hNode - * \param[in] attr - * \param[out] value_out - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * - * \sa - * ::CUaccessPolicyWindow - */ -CUresult CUDAAPI cuGraphKernelNodeGetAttribute(CUgraphNode hNode, CUkernelNodeAttrID attr, - CUkernelNodeAttrValue *value_out); - -/** - * \brief Sets node attribute. - * - * Sets attribute \p attr on node \p hNode from corresponding attribute of - * \p value. - * - * \param[out] hNode - * \param[in] attr - * \param[out] value - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE - * \notefnerr - * - * \sa - * ::CUaccessPolicyWindow - */ -CUresult CUDAAPI cuGraphKernelNodeSetAttribute(CUgraphNode hNode, CUkernelNodeAttrID attr, - const CUkernelNodeAttrValue *value); - -/** - * \brief Write a DOT file describing graph structure - * - * Using the provided \p hGraph, write to \p path a DOT formatted description of the graph. - * By default this includes the graph topology, node types, node id, kernel names and memcpy direction. - * \p flags can be specified to write more detailed information about each node type such as - * parameter values, kernel attributes, node and function handles. - * - * \param hGraph - The graph to create a DOT file from - * \param path - The path to write the DOT file to - * \param flags - Flags from CUgraphDebugDot_flags for specifying which additional node information to write - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_OPERATING_SYSTEM - */ -CUresult CUDAAPI cuGraphDebugDotPrint(CUgraph hGraph, const char *path, unsigned int flags); - -/** - * \brief Create a user object - * - * Create a user object with the specified destructor callback and initial reference count. The - * initial references are owned by the caller. - * - * Destructor callbacks cannot make CUDA API calls and should avoid blocking behavior, as they - * are executed by a shared internal thread. Another thread may be signaled to perform such - * actions, if it does not block forward progress of tasks scheduled through CUDA. - * - * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. - * - * \param object_out - Location to return the user object handle - * \param ptr - The pointer to pass to the destroy function - * \param destroy - Callback to free the user object when it is no longer in use - * \param initialRefcount - The initial refcount to create the object with, typically 1. The - * initial references are owned by the calling thread. - * \param flags - Currently it is required to pass ::CU_USER_OBJECT_NO_DESTRUCTOR_SYNC, - * which is the only defined flag. This indicates that the destroy - * callback cannot be waited on by any CUDA API. Users requiring - * synchronization of the callback should signal its completion - * manually. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::cuUserObjectRetain, - * ::cuUserObjectRelease, - * ::cuGraphRetainUserObject, - * ::cuGraphReleaseUserObject, - * ::cuGraphCreate - */ -CUresult CUDAAPI cuUserObjectCreate(CUuserObject *object_out, void *ptr, CUhostFn destroy, - unsigned int initialRefcount, unsigned int flags); - -/** - * \brief Retain a reference to a user object - * - * Retains new references to a user object. The new references are owned by the caller. - * - * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. - * - * \param object - The object to retain - * \param count - The number of references to retain, typically 1. Must be nonzero - * and not larger than INT_MAX. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::cuUserObjectCreate, - * ::cuUserObjectRelease, - * ::cuGraphRetainUserObject, - * ::cuGraphReleaseUserObject, - * ::cuGraphCreate - */ -CUresult CUDAAPI cuUserObjectRetain(CUuserObject object, unsigned int count); - -/** - * \brief Release a reference to a user object - * - * Releases user object references owned by the caller. The object's destructor is invoked if - * the reference count reaches zero. - * - * It is undefined behavior to release references not owned by the caller, or to use a user - * object handle after all references are released. - * - * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. - * - * \param object - The object to release - * \param count - The number of references to release, typically 1. Must be nonzero - * and not larger than INT_MAX. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::cuUserObjectCreate, - * ::cuUserObjectRetain, - * ::cuGraphRetainUserObject, - * ::cuGraphReleaseUserObject, - * ::cuGraphCreate - */ -CUresult CUDAAPI cuUserObjectRelease(CUuserObject object, unsigned int count); - -/** - * \brief Retain a reference to a user object from a graph - * - * Creates or moves user object references that will be owned by a CUDA graph. - * - * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. - * - * \param graph - The graph to associate the reference with - * \param object - The user object to retain a reference for - * \param count - The number of references to add to the graph, typically 1. Must be - * nonzero and not larger than INT_MAX. - * \param flags - The optional flag ::CU_GRAPH_USER_OBJECT_MOVE transfers references - * from the calling thread, rather than create new references. Pass 0 - * to create new references. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::cuUserObjectCreate, - * ::cuUserObjectRetain, - * ::cuUserObjectRelease, - * ::cuGraphReleaseUserObject, - * ::cuGraphCreate - */ -CUresult CUDAAPI cuGraphRetainUserObject(CUgraph graph, CUuserObject object, unsigned int count, unsigned int flags); - -/** - * \brief Release a user object reference from a graph - * - * Releases user object references owned by a graph. - * - * See CUDA User Objects in the CUDA C++ Programming Guide for more information on user objects. - * - * \param graph - The graph that will release the reference - * \param object - The user object to release a reference for - * \param count - The number of references to release, typically 1. Must be nonzero - * and not larger than INT_MAX. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::cuUserObjectCreate, - * ::cuUserObjectRetain, - * ::cuUserObjectRelease, - * ::cuGraphRetainUserObject, - * ::cuGraphCreate - */ -CUresult CUDAAPI cuGraphReleaseUserObject(CUgraph graph, CUuserObject object, unsigned int count); - -/** @} */ /* END CUDA_GRAPH */ - -/** - * \defgroup CUDA_OCCUPANCY Occupancy - * - * ___MANBRIEF___ occupancy calculation functions of the low-level CUDA driver - * API (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the occupancy calculation functions of the low-level CUDA - * driver application programming interface. - * - * @{ - */ - -/** - * \brief Returns occupancy of a function - * - * Returns in \p *numBlocks the number of the maximum active blocks per - * streaming multiprocessor. - * - * \param numBlocks - Returned occupancy - * \param func - Kernel for which occupancy is calculated - * \param blockSize - Block size the kernel is intended to be launched with - * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_UNKNOWN - * \notefnerr - * - * \sa - * ::cudaOccupancyMaxActiveBlocksPerMultiprocessor - */ -CUresult CUDAAPI cuOccupancyMaxActiveBlocksPerMultiprocessor(int *numBlocks, CUfunction func, int blockSize, size_t dynamicSMemSize); - -/** - * \brief Returns occupancy of a function - * - * Returns in \p *numBlocks the number of the maximum active blocks per - * streaming multiprocessor. - * - * The \p Flags parameter controls how special cases are handled. The - * valid flags are: - * - * - ::CU_OCCUPANCY_DEFAULT, which maintains the default behavior as - * ::cuOccupancyMaxActiveBlocksPerMultiprocessor; - * - * - ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE, which suppresses the - * default behavior on platform where global caching affects - * occupancy. On such platforms, if caching is enabled, but - * per-block SM resource usage would result in zero occupancy, the - * occupancy calculator will calculate the occupancy as if caching - * is disabled. Setting ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE makes - * the occupancy calculator to return 0 in such cases. More information - * can be found about this feature in the "Unified L1/Texture Cache" - * section of the Maxwell tuning guide. - * - * \param numBlocks - Returned occupancy - * \param func - Kernel for which occupancy is calculated - * \param blockSize - Block size the kernel is intended to be launched with - * \param dynamicSMemSize - Per-block dynamic shared memory usage intended, in bytes - * \param flags - Requested behavior for the occupancy calculator - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_UNKNOWN - * \notefnerr - * - * \sa - * ::cudaOccupancyMaxActiveBlocksPerMultiprocessorWithFlags - */ -CUresult CUDAAPI cuOccupancyMaxActiveBlocksPerMultiprocessorWithFlags(int *numBlocks, CUfunction func, int blockSize, size_t dynamicSMemSize, unsigned int flags); - -/** - * \brief Suggest a launch configuration with reasonable occupancy - * - * Returns in \p *blockSize a reasonable block size that can achieve - * the maximum occupancy (or, the maximum number of active warps with - * the fewest blocks per multiprocessor), and in \p *minGridSize the - * minimum grid size to achieve the maximum occupancy. - * - * If \p blockSizeLimit is 0, the configurator will use the maximum - * block size permitted by the device / function instead. - * - * If per-block dynamic shared memory allocation is not needed, the - * user should leave both \p blockSizeToDynamicSMemSize and \p - * dynamicSMemSize as 0. - * - * If per-block dynamic shared memory allocation is needed, then if - * the dynamic shared memory size is constant regardless of block - * size, the size should be passed through \p dynamicSMemSize, and \p - * blockSizeToDynamicSMemSize should be NULL. - * - * Otherwise, if the per-block dynamic shared memory size varies with - * different block sizes, the user needs to provide a unary function - * through \p blockSizeToDynamicSMemSize that computes the dynamic - * shared memory needed by \p func for any given block size. \p - * dynamicSMemSize is ignored. An example signature is: - * - * \code - * // Take block size, returns dynamic shared memory needed - * size_t blockToSmem(int blockSize); - * \endcode - * - * \param minGridSize - Returned minimum grid size needed to achieve the maximum occupancy - * \param blockSize - Returned maximum block size that can achieve the maximum occupancy - * \param func - Kernel for which launch configuration is calculated - * \param blockSizeToDynamicSMemSize - A function that calculates how much per-block dynamic shared memory \p func uses based on the block size - * \param dynamicSMemSize - Dynamic shared memory usage intended, in bytes - * \param blockSizeLimit - The maximum block size \p func is designed to handle - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_UNKNOWN - * \notefnerr - * - * \sa - * ::cudaOccupancyMaxPotentialBlockSize - */ -CUresult CUDAAPI cuOccupancyMaxPotentialBlockSize(int *minGridSize, int *blockSize, CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit); - -/** - * \brief Suggest a launch configuration with reasonable occupancy - * - * An extended version of ::cuOccupancyMaxPotentialBlockSize. In - * addition to arguments passed to ::cuOccupancyMaxPotentialBlockSize, - * ::cuOccupancyMaxPotentialBlockSizeWithFlags also takes a \p Flags - * parameter. - * - * The \p Flags parameter controls how special cases are handled. The - * valid flags are: - * - * - ::CU_OCCUPANCY_DEFAULT, which maintains the default behavior as - * ::cuOccupancyMaxPotentialBlockSize; - * - * - ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE, which suppresses the - * default behavior on platform where global caching affects - * occupancy. On such platforms, the launch configurations that - * produces maximal occupancy might not support global - * caching. Setting ::CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE - * guarantees that the the produced launch configuration is global - * caching compatible at a potential cost of occupancy. More information - * can be found about this feature in the "Unified L1/Texture Cache" - * section of the Maxwell tuning guide. - * - * \param minGridSize - Returned minimum grid size needed to achieve the maximum occupancy - * \param blockSize - Returned maximum block size that can achieve the maximum occupancy - * \param func - Kernel for which launch configuration is calculated - * \param blockSizeToDynamicSMemSize - A function that calculates how much per-block dynamic shared memory \p func uses based on the block size - * \param dynamicSMemSize - Dynamic shared memory usage intended, in bytes - * \param blockSizeLimit - The maximum block size \p func is designed to handle - * \param flags - Options - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_UNKNOWN - * \notefnerr - * - * \sa - * ::cudaOccupancyMaxPotentialBlockSizeWithFlags - */ -CUresult CUDAAPI cuOccupancyMaxPotentialBlockSizeWithFlags(int *minGridSize, int *blockSize, CUfunction func, CUoccupancyB2DSize blockSizeToDynamicSMemSize, size_t dynamicSMemSize, int blockSizeLimit, unsigned int flags); - -/** - * \brief Returns dynamic shared memory available per block when launching \p numBlocks blocks on SM - * - * Returns in \p *dynamicSmemSize the maximum size of dynamic shared memory to allow \p numBlocks blocks per SM. - * - * \param dynamicSmemSize - Returned maximum dynamic shared memory - * \param func - Kernel function for which occupancy is calculated - * \param numBlocks - Number of blocks to fit on SM - * \param blockSize - Size of the blocks - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_UNKNOWN - * \notefnerr - * - * \sa - */ -CUresult CUDAAPI cuOccupancyAvailableDynamicSMemPerBlock(size_t *dynamicSmemSize, CUfunction func, int numBlocks, int blockSize); - -/** @} */ /* END CUDA_OCCUPANCY */ - -/** - * \defgroup CUDA_TEXREF_DEPRECATED Texture Reference Management [DEPRECATED] - * - * ___MANBRIEF___ deprecated texture reference management functions of the - * low-level CUDA driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the deprecated texture reference management - * functions of the low-level CUDA driver application programming interface. - * - * @{ - */ - -/** - * \brief Binds an array as a texture reference - * - * \deprecated - * - * Binds the CUDA array \p hArray to the texture reference \p hTexRef. Any - * previous address or CUDA array state associated with the texture reference - * is superseded by this function. \p Flags must be set to - * ::CU_TRSA_OVERRIDE_FORMAT. Any CUDA array previously bound to \p hTexRef is - * unbound. - * - * \param hTexRef - Texture reference to bind - * \param hArray - Array to bind - * \param Flags - Options (must be ::CU_TRSA_OVERRIDE_FORMAT) - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, - * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat, - * ::cudaBindTextureToArray - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetArray(CUtexref hTexRef, CUarray hArray, unsigned int Flags); - -/** - * \brief Binds a mipmapped array to a texture reference - * - * \deprecated - * - * Binds the CUDA mipmapped array \p hMipmappedArray to the texture reference \p hTexRef. - * Any previous address or CUDA array state associated with the texture reference - * is superseded by this function. \p Flags must be set to ::CU_TRSA_OVERRIDE_FORMAT. - * Any CUDA array previously bound to \p hTexRef is unbound. - * - * \param hTexRef - Texture reference to bind - * \param hMipmappedArray - Mipmapped array to bind - * \param Flags - Options (must be ::CU_TRSA_OVERRIDE_FORMAT) - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, - * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat, - * ::cudaBindTextureToMipmappedArray - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMipmappedArray(CUtexref hTexRef, CUmipmappedArray hMipmappedArray, unsigned int Flags); - -/** - * \brief Binds an address as a texture reference - * - * \deprecated - * - * Binds a linear address range to the texture reference \p hTexRef. Any - * previous address or CUDA array state associated with the texture reference - * is superseded by this function. Any memory previously bound to \p hTexRef - * is unbound. - * - * Since the hardware enforces an alignment requirement on texture base - * addresses, ::cuTexRefSetAddress() passes back a byte offset in - * \p *ByteOffset that must be applied to texture fetches in order to read from - * the desired memory. This offset must be divided by the texel size and - * passed to kernels that read from the texture so they can be applied to the - * ::tex1Dfetch() function. - * - * If the device memory pointer was returned from ::cuMemAlloc(), the offset - * is guaranteed to be 0 and NULL may be passed as the \p ByteOffset parameter. - * - * The total number of elements (or texels) in the linear address range - * cannot exceed ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH. - * The number of elements is computed as (\p bytes / bytesPerElement), - * where bytesPerElement is determined from the data format and number of - * components set using ::cuTexRefSetFormat(). - * - * \param ByteOffset - Returned byte offset - * \param hTexRef - Texture reference to bind - * \param dptr - Device pointer to bind - * \param bytes - Size of memory to bind in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat, - * ::cudaBindTexture - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetAddress(size_t *ByteOffset, CUtexref hTexRef, CUdeviceptr dptr, size_t bytes); - -/** - * \brief Binds an address as a 2D texture reference - * - * \deprecated - * - * Binds a linear address range to the texture reference \p hTexRef. Any - * previous address or CUDA array state associated with the texture reference - * is superseded by this function. Any memory previously bound to \p hTexRef - * is unbound. - * - * Using a ::tex2D() function inside a kernel requires a call to either - * ::cuTexRefSetArray() to bind the corresponding texture reference to an - * array, or ::cuTexRefSetAddress2D() to bind the texture reference to linear - * memory. - * - * Function calls to ::cuTexRefSetFormat() cannot follow calls to - * ::cuTexRefSetAddress2D() for the same texture reference. - * - * It is required that \p dptr be aligned to the appropriate hardware-specific - * texture alignment. You can query this value using the device attribute - * ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT. If an unaligned \p dptr is - * supplied, ::CUDA_ERROR_INVALID_VALUE is returned. - * - * \p Pitch has to be aligned to the hardware-specific texture pitch alignment. - * This value can be queried using the device attribute - * ::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT. If an unaligned \p Pitch is - * supplied, ::CUDA_ERROR_INVALID_VALUE is returned. - * - * Width and Height, which are specified in elements (or texels), cannot exceed - * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH and - * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT respectively. - * \p Pitch, which is specified in bytes, cannot exceed - * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH. - * - * \param hTexRef - Texture reference to bind - * \param desc - Descriptor of CUDA array - * \param dptr - Device pointer to bind - * \param Pitch - Line pitch in bytes - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat, - * ::cudaBindTexture2D - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetAddress2D(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR *desc, CUdeviceptr dptr, size_t Pitch); - -/** - * \brief Sets the format for a texture reference - * - * \deprecated - * - * Specifies the format of the data to be read by the texture reference - * \p hTexRef. \p fmt and \p NumPackedComponents are exactly analogous to the - * ::Format and ::NumChannels members of the ::CUDA_ARRAY_DESCRIPTOR structure: - * They specify the format of each component and the number of components per - * array element. - * - * \param hTexRef - Texture reference - * \param fmt - Format to set - * \param NumPackedComponents - Number of components per array element - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat, - * ::cudaCreateChannelDesc, - * ::cudaBindTexture, - * ::cudaBindTexture2D, - * ::cudaBindTextureToArray, - * ::cudaBindTextureToMipmappedArray - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetFormat(CUtexref hTexRef, CUarray_format fmt, int NumPackedComponents); - -/** - * \brief Sets the addressing mode for a texture reference - * - * \deprecated - * - * Specifies the addressing mode \p am for the given dimension \p dim of the - * texture reference \p hTexRef. If \p dim is zero, the addressing mode is - * applied to the first parameter of the functions used to fetch from the - * texture; if \p dim is 1, the second, and so on. ::CUaddress_mode is defined - * as: - * \code - typedef enum CUaddress_mode_enum { - CU_TR_ADDRESS_MODE_WRAP = 0, - CU_TR_ADDRESS_MODE_CLAMP = 1, - CU_TR_ADDRESS_MODE_MIRROR = 2, - CU_TR_ADDRESS_MODE_BORDER = 3 - } CUaddress_mode; - * \endcode - * - * Note that this call has no effect if \p hTexRef is bound to linear memory. - * Also, if the flag, ::CU_TRSF_NORMALIZED_COORDINATES, is not set, the only - * supported address mode is ::CU_TR_ADDRESS_MODE_CLAMP. - * - * \param hTexRef - Texture reference - * \param dim - Dimension - * \param am - Addressing mode to set - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetArray, - * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat, - * ::cudaBindTexture, - * ::cudaBindTexture2D, - * ::cudaBindTextureToArray, - * ::cudaBindTextureToMipmappedArray - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetAddressMode(CUtexref hTexRef, int dim, CUaddress_mode am); - -/** - * \brief Sets the filtering mode for a texture reference - * - * \deprecated - * - * Specifies the filtering mode \p fm to be used when reading memory through - * the texture reference \p hTexRef. ::CUfilter_mode_enum is defined as: - * - * \code - typedef enum CUfilter_mode_enum { - CU_TR_FILTER_MODE_POINT = 0, - CU_TR_FILTER_MODE_LINEAR = 1 - } CUfilter_mode; - * \endcode - * - * Note that this call has no effect if \p hTexRef is bound to linear memory. - * - * \param hTexRef - Texture reference - * \param fm - Filtering mode to set - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat, - * ::cudaBindTextureToArray - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetFilterMode(CUtexref hTexRef, CUfilter_mode fm); - -/** - * \brief Sets the mipmap filtering mode for a texture reference - * - * \deprecated - * - * Specifies the mipmap filtering mode \p fm to be used when reading memory through - * the texture reference \p hTexRef. ::CUfilter_mode_enum is defined as: - * - * \code - typedef enum CUfilter_mode_enum { - CU_TR_FILTER_MODE_POINT = 0, - CU_TR_FILTER_MODE_LINEAR = 1 - } CUfilter_mode; - * \endcode - * - * Note that this call has no effect if \p hTexRef is not bound to a mipmapped array. - * - * \param hTexRef - Texture reference - * \param fm - Filtering mode to set - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat, - * ::cudaBindTextureToMipmappedArray - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMipmapFilterMode(CUtexref hTexRef, CUfilter_mode fm); - -/** - * \brief Sets the mipmap level bias for a texture reference - * - * \deprecated - * - * Specifies the mipmap level bias \p bias to be added to the specified mipmap level when - * reading memory through the texture reference \p hTexRef. - * - * Note that this call has no effect if \p hTexRef is not bound to a mipmapped array. - * - * \param hTexRef - Texture reference - * \param bias - Mipmap level bias - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat, - * ::cudaBindTextureToMipmappedArray - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMipmapLevelBias(CUtexref hTexRef, float bias); - -/** - * \brief Sets the mipmap min/max mipmap level clamps for a texture reference - * - * \deprecated - * - * Specifies the min/max mipmap level clamps, \p minMipmapLevelClamp and \p maxMipmapLevelClamp - * respectively, to be used when reading memory through the texture reference - * \p hTexRef. - * - * Note that this call has no effect if \p hTexRef is not bound to a mipmapped array. - * - * \param hTexRef - Texture reference - * \param minMipmapLevelClamp - Mipmap min level clamp - * \param maxMipmapLevelClamp - Mipmap max level clamp - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat, - * ::cudaBindTextureToMipmappedArray - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMipmapLevelClamp(CUtexref hTexRef, float minMipmapLevelClamp, float maxMipmapLevelClamp); - -/** - * \brief Sets the maximum anisotropy for a texture reference - * - * \deprecated - * - * Specifies the maximum anisotropy \p maxAniso to be used when reading memory through - * the texture reference \p hTexRef. - * - * Note that this call has no effect if \p hTexRef is bound to linear memory. - * - * \param hTexRef - Texture reference - * \param maxAniso - Maximum anisotropy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat, - * ::cudaBindTextureToArray, - * ::cudaBindTextureToMipmappedArray - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetMaxAnisotropy(CUtexref hTexRef, unsigned int maxAniso); - -/** - * \brief Sets the border color for a texture reference - * - * \deprecated - * - * Specifies the value of the RGBA color via the \p pBorderColor to the texture reference - * \p hTexRef. The color value supports only float type and holds color components in - * the following sequence: - * pBorderColor[0] holds 'R' component - * pBorderColor[1] holds 'G' component - * pBorderColor[2] holds 'B' component - * pBorderColor[3] holds 'A' component - * - * Note that the color values can be set only when the Address mode is set to - * CU_TR_ADDRESS_MODE_BORDER using ::cuTexRefSetAddressMode. - * Applications using integer border color values have to "reinterpret_cast" their values to float. - * - * \param hTexRef - Texture reference - * \param pBorderColor - RGBA color - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddressMode, - * ::cuTexRefGetAddressMode, ::cuTexRefGetBorderColor, - * ::cudaBindTexture, - * ::cudaBindTexture2D, - * ::cudaBindTextureToArray, - * ::cudaBindTextureToMipmappedArray - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetBorderColor(CUtexref hTexRef, float *pBorderColor); - -/** - * \brief Sets the flags for a texture reference - * - * \deprecated - * - * Specifies optional flags via \p Flags to specify the behavior of data - * returned through the texture reference \p hTexRef. The valid flags are: - * - * - ::CU_TRSF_READ_AS_INTEGER, which suppresses the default behavior of - * having the texture promote integer data to floating point data in the - * range [0, 1]. Note that texture with 32-bit integer format - * would not be promoted, regardless of whether or not this - * flag is specified; - * - ::CU_TRSF_NORMALIZED_COORDINATES, which suppresses the - * default behavior of having the texture coordinates range - * from [0, Dim) where Dim is the width or height of the CUDA - * array. Instead, the texture coordinates [0, 1.0) reference - * the entire breadth of the array dimension; - * - ::CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION, which disables any trilinear - * filtering optimizations. Trilinear optimizations improve texture filtering - * performance by allowing bilinear filtering on textures in scenarios where - * it can closely approximate the expected results. - * - * \param hTexRef - Texture reference - * \param Flags - Optional flags to set - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFilterMode, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat, - * ::cudaBindTexture, - * ::cudaBindTexture2D, - * ::cudaBindTextureToArray, - * ::cudaBindTextureToMipmappedArray - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefSetFlags(CUtexref hTexRef, unsigned int Flags); - -/** - * \brief Gets the address associated with a texture reference - * - * \deprecated - * - * Returns in \p *pdptr the base address bound to the texture reference - * \p hTexRef, or returns ::CUDA_ERROR_INVALID_VALUE if the texture reference - * is not bound to any device memory range. - * - * \param pdptr - Returned device address - * \param hTexRef - Texture reference - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetAddress(CUdeviceptr *pdptr, CUtexref hTexRef); - -/** - * \brief Gets the array bound to a texture reference - * - * \deprecated - * - * Returns in \p *phArray the CUDA array bound to the texture reference - * \p hTexRef, or returns ::CUDA_ERROR_INVALID_VALUE if the texture reference - * is not bound to any CUDA array. - * - * \param phArray - Returned array - * \param hTexRef - Texture reference - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetArray(CUarray *phArray, CUtexref hTexRef); - -/** - * \brief Gets the mipmapped array bound to a texture reference - * - * \deprecated - * - * Returns in \p *phMipmappedArray the CUDA mipmapped array bound to the texture - * reference \p hTexRef, or returns ::CUDA_ERROR_INVALID_VALUE if the texture reference - * is not bound to any CUDA mipmapped array. - * - * \param phMipmappedArray - Returned mipmapped array - * \param hTexRef - Texture reference - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMipmappedArray(CUmipmappedArray *phMipmappedArray, CUtexref hTexRef); - -/** - * \brief Gets the addressing mode used by a texture reference - * - * \deprecated - * - * Returns in \p *pam the addressing mode corresponding to the - * dimension \p dim of the texture reference \p hTexRef. Currently, the only - * valid value for \p dim are 0 and 1. - * - * \param pam - Returned addressing mode - * \param hTexRef - Texture reference - * \param dim - Dimension - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetAddressMode(CUaddress_mode *pam, CUtexref hTexRef, int dim); - -/** - * \brief Gets the filter-mode used by a texture reference - * - * \deprecated - * - * Returns in \p *pfm the filtering mode of the texture reference - * \p hTexRef. - * - * \param pfm - Returned filtering mode - * \param hTexRef - Texture reference - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFlags, ::cuTexRefGetFormat - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetFilterMode(CUfilter_mode *pfm, CUtexref hTexRef); - -/** - * \brief Gets the format used by a texture reference - * - * \deprecated - * - * Returns in \p *pFormat and \p *pNumChannels the format and number - * of components of the CUDA array bound to the texture reference \p hTexRef. - * If \p pFormat or \p pNumChannels is NULL, it will be ignored. - * - * \param pFormat - Returned format - * \param pNumChannels - Returned number of components - * \param hTexRef - Texture reference - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetFormat(CUarray_format *pFormat, int *pNumChannels, CUtexref hTexRef); - -/** - * \brief Gets the mipmap filtering mode for a texture reference - * - * \deprecated - * - * Returns the mipmap filtering mode in \p pfm that's used when reading memory through - * the texture reference \p hTexRef. - * - * \param pfm - Returned mipmap filtering mode - * \param hTexRef - Texture reference - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMipmapFilterMode(CUfilter_mode *pfm, CUtexref hTexRef); - -/** - * \brief Gets the mipmap level bias for a texture reference - * - * \deprecated - * - * Returns the mipmap level bias in \p pBias that's added to the specified mipmap - * level when reading memory through the texture reference \p hTexRef. - * - * \param pbias - Returned mipmap level bias - * \param hTexRef - Texture reference - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMipmapLevelBias(float *pbias, CUtexref hTexRef); - -/** - * \brief Gets the min/max mipmap level clamps for a texture reference - * - * \deprecated - * - * Returns the min/max mipmap level clamps in \p pminMipmapLevelClamp and \p pmaxMipmapLevelClamp - * that's used when reading memory through the texture reference \p hTexRef. - * - * \param pminMipmapLevelClamp - Returned mipmap min level clamp - * \param pmaxMipmapLevelClamp - Returned mipmap max level clamp - * \param hTexRef - Texture reference - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMipmapLevelClamp(float *pminMipmapLevelClamp, float *pmaxMipmapLevelClamp, CUtexref hTexRef); - -/** - * \brief Gets the maximum anisotropy for a texture reference - * - * \deprecated - * - * Returns the maximum anisotropy in \p pmaxAniso that's used when reading memory through - * the texture reference \p hTexRef. - * - * \param pmaxAniso - Returned maximum anisotropy - * \param hTexRef - Texture reference - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFlags, ::cuTexRefGetFormat - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetMaxAnisotropy(int *pmaxAniso, CUtexref hTexRef); - -/** - * \brief Gets the border color used by a texture reference - * - * \deprecated - * - * Returns in \p pBorderColor, values of the RGBA color used by - * the texture reference \p hTexRef. - * The color value is of type float and holds color components in - * the following sequence: - * pBorderColor[0] holds 'R' component - * pBorderColor[1] holds 'G' component - * pBorderColor[2] holds 'B' component - * pBorderColor[3] holds 'A' component - * - * \param hTexRef - Texture reference - * \param pBorderColor - Returned Type and Value of RGBA color - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddressMode, - * ::cuTexRefSetAddressMode, ::cuTexRefSetBorderColor - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetBorderColor(float *pBorderColor, CUtexref hTexRef); - -/** - * \brief Gets the flags used by a texture reference - * - * \deprecated - * - * Returns in \p *pFlags the flags of the texture reference \p hTexRef. - * - * \param pFlags - Returned flags - * \param hTexRef - Texture reference - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefSetAddress, - * ::cuTexRefSetAddress2D, ::cuTexRefSetAddressMode, ::cuTexRefSetArray, - * ::cuTexRefSetFilterMode, ::cuTexRefSetFlags, ::cuTexRefSetFormat, - * ::cuTexRefGetAddress, ::cuTexRefGetAddressMode, ::cuTexRefGetArray, - * ::cuTexRefGetFilterMode, ::cuTexRefGetFormat - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefGetFlags(unsigned int *pFlags, CUtexref hTexRef); - -/** - * \brief Creates a texture reference - * - * \deprecated - * - * Creates a texture reference and returns its handle in \p *pTexRef. Once - * created, the application must call ::cuTexRefSetArray() or - * ::cuTexRefSetAddress() to associate the reference with allocated memory. - * Other texture reference functions are used to specify the format and - * interpretation (addressing, filtering, etc.) to be used when the memory is - * read through this texture reference. - * - * \param pTexRef - Returned texture reference - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefDestroy - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefCreate(CUtexref *pTexRef); - -/** - * \brief Destroys a texture reference - * - * \deprecated - * - * Destroys the texture reference specified by \p hTexRef. - * - * \param hTexRef - Texture reference to destroy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuTexRefCreate - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuTexRefDestroy(CUtexref hTexRef); - -/** @} */ /* END CUDA_TEXREF_DEPRECATED */ - - -/** - * \defgroup CUDA_SURFREF_DEPRECATED Surface Reference Management [DEPRECATED] - * - * ___MANBRIEF___ surface reference management functions of the low-level CUDA - * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the surface reference management functions of the - * low-level CUDA driver application programming interface. - * - * @{ - */ - -/** - * \brief Sets the CUDA array for a surface reference. - * - * \deprecated - * - * Sets the CUDA array \p hArray to be read and written by the surface reference - * \p hSurfRef. Any previous CUDA array state associated with the surface - * reference is superseded by this function. \p Flags must be set to 0. - * The ::CUDA_ARRAY3D_SURFACE_LDST flag must have been set for the CUDA array. - * Any CUDA array previously bound to \p hSurfRef is unbound. - - * \param hSurfRef - Surface reference handle - * \param hArray - CUDA array handle - * \param Flags - set to 0 - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::cuModuleGetSurfRef, - * ::cuSurfRefGetArray, - * ::cudaBindSurfaceToArray - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuSurfRefSetArray(CUsurfref hSurfRef, CUarray hArray, unsigned int Flags); - -/** - * \brief Passes back the CUDA array bound to a surface reference. - * - * \deprecated - * - * Returns in \p *phArray the CUDA array bound to the surface reference - * \p hSurfRef, or returns ::CUDA_ERROR_INVALID_VALUE if the surface reference - * is not bound to any CUDA array. - - * \param phArray - Surface reference handle - * \param hSurfRef - Surface reference handle - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa ::cuModuleGetSurfRef, ::cuSurfRefSetArray - */ -__CUDA_DEPRECATED CUresult CUDAAPI cuSurfRefGetArray(CUarray *phArray, CUsurfref hSurfRef); - -/** @} */ /* END CUDA_SURFREF_DEPRECATED */ - -/** - * \defgroup CUDA_TEXOBJECT Texture Object Management - * - * ___MANBRIEF___ texture object management functions of the low-level CUDA - * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the texture object management functions of the - * low-level CUDA driver application programming interface. The texture - * object API is only supported on devices of compute capability 3.0 or higher. - * - * @{ - */ - -/** - * \brief Creates a texture object - * - * Creates a texture object and returns it in \p pTexObject. \p pResDesc describes - * the data to texture from. \p pTexDesc describes how the data should be sampled. - * \p pResViewDesc is an optional argument that specifies an alternate format for - * the data described by \p pResDesc, and also describes the subresource region - * to restrict access to when texturing. \p pResViewDesc can only be specified if - * the type of resource is a CUDA array or a CUDA mipmapped array. - * - * Texture objects are only supported on devices of compute capability 3.0 or higher. - * Additionally, a texture object is an opaque value, and, as such, should only be - * accessed through CUDA API calls. - * - * The ::CUDA_RESOURCE_DESC structure is defined as: - * \code - typedef struct CUDA_RESOURCE_DESC_st - { - CUresourcetype resType; - - union { - struct { - CUarray hArray; - } array; - struct { - CUmipmappedArray hMipmappedArray; - } mipmap; - struct { - CUdeviceptr devPtr; - CUarray_format format; - unsigned int numChannels; - size_t sizeInBytes; - } linear; - struct { - CUdeviceptr devPtr; - CUarray_format format; - unsigned int numChannels; - size_t width; - size_t height; - size_t pitchInBytes; - } pitch2D; - } res; - - unsigned int flags; - } CUDA_RESOURCE_DESC; - - * \endcode - * where: - * - ::CUDA_RESOURCE_DESC::resType specifies the type of resource to texture from. - * CUresourceType is defined as: - * \code - typedef enum CUresourcetype_enum { - CU_RESOURCE_TYPE_ARRAY = 0x00, - CU_RESOURCE_TYPE_MIPMAPPED_ARRAY = 0x01, - CU_RESOURCE_TYPE_LINEAR = 0x02, - CU_RESOURCE_TYPE_PITCH2D = 0x03 - } CUresourcetype; - * \endcode - * - * \par - * If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_ARRAY, ::CUDA_RESOURCE_DESC::res::array::hArray - * must be set to a valid CUDA array handle. - * - * \par - * If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_MIPMAPPED_ARRAY, ::CUDA_RESOURCE_DESC::res::mipmap::hMipmappedArray - * must be set to a valid CUDA mipmapped array handle. - * - * \par - * If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_LINEAR, ::CUDA_RESOURCE_DESC::res::linear::devPtr - * must be set to a valid device pointer, that is aligned to ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT. - * ::CUDA_RESOURCE_DESC::res::linear::format and ::CUDA_RESOURCE_DESC::res::linear::numChannels - * describe the format of each component and the number of components per array element. ::CUDA_RESOURCE_DESC::res::linear::sizeInBytes - * specifies the size of the array in bytes. The total number of elements in the linear address range cannot exceed - * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH. The number of elements is computed as (sizeInBytes / (sizeof(format) * numChannels)). - * - * \par - * If ::CUDA_RESOURCE_DESC::resType is set to ::CU_RESOURCE_TYPE_PITCH2D, ::CUDA_RESOURCE_DESC::res::pitch2D::devPtr - * must be set to a valid device pointer, that is aligned to ::CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT. - * ::CUDA_RESOURCE_DESC::res::pitch2D::format and ::CUDA_RESOURCE_DESC::res::pitch2D::numChannels - * describe the format of each component and the number of components per array element. ::CUDA_RESOURCE_DESC::res::pitch2D::width - * and ::CUDA_RESOURCE_DESC::res::pitch2D::height specify the width and height of the array in elements, and cannot exceed - * ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH and ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT respectively. - * ::CUDA_RESOURCE_DESC::res::pitch2D::pitchInBytes specifies the pitch between two rows in bytes and has to be aligned to - * ::CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT. Pitch cannot exceed ::CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH. - * - * - ::flags must be set to zero. - * - * - * The ::CUDA_TEXTURE_DESC struct is defined as - * \code - typedef struct CUDA_TEXTURE_DESC_st { - CUaddress_mode addressMode[3]; - CUfilter_mode filterMode; - unsigned int flags; - unsigned int maxAnisotropy; - CUfilter_mode mipmapFilterMode; - float mipmapLevelBias; - float minMipmapLevelClamp; - float maxMipmapLevelClamp; - } CUDA_TEXTURE_DESC; - * \endcode - * where - * - ::CUDA_TEXTURE_DESC::addressMode specifies the addressing mode for each dimension of the texture data. ::CUaddress_mode is defined as: - * \code - typedef enum CUaddress_mode_enum { - CU_TR_ADDRESS_MODE_WRAP = 0, - CU_TR_ADDRESS_MODE_CLAMP = 1, - CU_TR_ADDRESS_MODE_MIRROR = 2, - CU_TR_ADDRESS_MODE_BORDER = 3 - } CUaddress_mode; - * \endcode - * This is ignored if ::CUDA_RESOURCE_DESC::resType is ::CU_RESOURCE_TYPE_LINEAR. Also, if the flag, ::CU_TRSF_NORMALIZED_COORDINATES - * is not set, the only supported address mode is ::CU_TR_ADDRESS_MODE_CLAMP. - * - * - ::CUDA_TEXTURE_DESC::filterMode specifies the filtering mode to be used when fetching from the texture. CUfilter_mode is defined as: - * \code - typedef enum CUfilter_mode_enum { - CU_TR_FILTER_MODE_POINT = 0, - CU_TR_FILTER_MODE_LINEAR = 1 - } CUfilter_mode; - * \endcode - * This is ignored if ::CUDA_RESOURCE_DESC::resType is ::CU_RESOURCE_TYPE_LINEAR. - * - * - ::CUDA_TEXTURE_DESC::flags can be any combination of the following: - * - ::CU_TRSF_READ_AS_INTEGER, which suppresses the default behavior of - * having the texture promote integer data to floating point data in the - * range [0, 1]. Note that texture with 32-bit integer format would not be - * promoted, regardless of whether or not this flag is specified. - * - ::CU_TRSF_NORMALIZED_COORDINATES, which suppresses the default behavior - * of having the texture coordinates range from [0, Dim) where Dim is the - * width or height of the CUDA array. Instead, the texture coordinates - * [0, 1.0) reference the entire breadth of the array dimension; Note that - * for CUDA mipmapped arrays, this flag has to be set. - * - ::CU_TRSF_DISABLE_TRILINEAR_OPTIMIZATION, which disables any trilinear - * filtering optimizations. Trilinear optimizations improve texture filtering - * performance by allowing bilinear filtering on textures in scenarios where - * it can closely approximate the expected results. - * - * - ::CUDA_TEXTURE_DESC::maxAnisotropy specifies the maximum anisotropy ratio to be used when doing anisotropic filtering. This value will be - * clamped to the range [1,16]. - * - * - ::CUDA_TEXTURE_DESC::mipmapFilterMode specifies the filter mode when the calculated mipmap level lies between two defined mipmap levels. - * - * - ::CUDA_TEXTURE_DESC::mipmapLevelBias specifies the offset to be applied to the calculated mipmap level. - * - * - ::CUDA_TEXTURE_DESC::minMipmapLevelClamp specifies the lower end of the mipmap level range to clamp access to. - * - * - ::CUDA_TEXTURE_DESC::maxMipmapLevelClamp specifies the upper end of the mipmap level range to clamp access to. - * - * - * The ::CUDA_RESOURCE_VIEW_DESC struct is defined as - * \code - typedef struct CUDA_RESOURCE_VIEW_DESC_st - { - CUresourceViewFormat format; - size_t width; - size_t height; - size_t depth; - unsigned int firstMipmapLevel; - unsigned int lastMipmapLevel; - unsigned int firstLayer; - unsigned int lastLayer; - } CUDA_RESOURCE_VIEW_DESC; - * \endcode - * where: - * - ::CUDA_RESOURCE_VIEW_DESC::format specifies how the data contained in the CUDA array or CUDA mipmapped array should - * be interpreted. Note that this can incur a change in size of the texture data. If the resource view format is a block - * compressed format, then the underlying CUDA array or CUDA mipmapped array has to have a base of format ::CU_AD_FORMAT_UNSIGNED_INT32. - * with 2 or 4 channels, depending on the block compressed format. For ex., BC1 and BC4 require the underlying CUDA array to have - * a format of ::CU_AD_FORMAT_UNSIGNED_INT32 with 2 channels. The other BC formats require the underlying resource to have the same base - * format but with 4 channels. - * - * - ::CUDA_RESOURCE_VIEW_DESC::width specifies the new width of the texture data. If the resource view format is a block - * compressed format, this value has to be 4 times the original width of the resource. For non block compressed formats, - * this value has to be equal to that of the original resource. - * - * - ::CUDA_RESOURCE_VIEW_DESC::height specifies the new height of the texture data. If the resource view format is a block - * compressed format, this value has to be 4 times the original height of the resource. For non block compressed formats, - * this value has to be equal to that of the original resource. - * - * - ::CUDA_RESOURCE_VIEW_DESC::depth specifies the new depth of the texture data. This value has to be equal to that of the - * original resource. - * - * - ::CUDA_RESOURCE_VIEW_DESC::firstMipmapLevel specifies the most detailed mipmap level. This will be the new mipmap level zero. - * For non-mipmapped resources, this value has to be zero.::CUDA_TEXTURE_DESC::minMipmapLevelClamp and ::CUDA_TEXTURE_DESC::maxMipmapLevelClamp - * will be relative to this value. For ex., if the firstMipmapLevel is set to 2, and a minMipmapLevelClamp of 1.2 is specified, - * then the actual minimum mipmap level clamp will be 3.2. - * - * - ::CUDA_RESOURCE_VIEW_DESC::lastMipmapLevel specifies the least detailed mipmap level. For non-mipmapped resources, this value - * has to be zero. - * - * - ::CUDA_RESOURCE_VIEW_DESC::firstLayer specifies the first layer index for layered textures. This will be the new layer zero. - * For non-layered resources, this value has to be zero. - * - * - ::CUDA_RESOURCE_VIEW_DESC::lastLayer specifies the last layer index for layered textures. For non-layered resources, - * this value has to be zero. - * - * - * \param pTexObject - Texture object to create - * \param pResDesc - Resource descriptor - * \param pTexDesc - Texture descriptor - * \param pResViewDesc - Resource view descriptor - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::cuTexObjectDestroy, - * ::cudaCreateTextureObject - */ -CUresult CUDAAPI cuTexObjectCreate(CUtexObject *pTexObject, const CUDA_RESOURCE_DESC *pResDesc, const CUDA_TEXTURE_DESC *pTexDesc, const CUDA_RESOURCE_VIEW_DESC *pResViewDesc); - -/** - * \brief Destroys a texture object - * - * Destroys the texture object specified by \p texObject. - * - * \param texObject - Texture object to destroy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::cuTexObjectCreate, - * ::cudaDestroyTextureObject - */ -CUresult CUDAAPI cuTexObjectDestroy(CUtexObject texObject); - -/** - * \brief Returns a texture object's resource descriptor - * - * Returns the resource descriptor for the texture object specified by \p texObject. - * - * \param pResDesc - Resource descriptor - * \param texObject - Texture object - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::cuTexObjectCreate, - * ::cudaGetTextureObjectResourceDesc, - */ -CUresult CUDAAPI cuTexObjectGetResourceDesc(CUDA_RESOURCE_DESC *pResDesc, CUtexObject texObject); - -/** - * \brief Returns a texture object's texture descriptor - * - * Returns the texture descriptor for the texture object specified by \p texObject. - * - * \param pTexDesc - Texture descriptor - * \param texObject - Texture object - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::cuTexObjectCreate, - * ::cudaGetTextureObjectTextureDesc - */ -CUresult CUDAAPI cuTexObjectGetTextureDesc(CUDA_TEXTURE_DESC *pTexDesc, CUtexObject texObject); - -/** - * \brief Returns a texture object's resource view descriptor - * - * Returns the resource view descriptor for the texture object specified by \p texObject. - * If no resource view was set for \p texObject, the ::CUDA_ERROR_INVALID_VALUE is returned. - * - * \param pResViewDesc - Resource view descriptor - * \param texObject - Texture object - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::cuTexObjectCreate, - * ::cudaGetTextureObjectResourceViewDesc - */ -CUresult CUDAAPI cuTexObjectGetResourceViewDesc(CUDA_RESOURCE_VIEW_DESC *pResViewDesc, CUtexObject texObject); - -/** @} */ /* END CUDA_TEXOBJECT */ - -/** - * \defgroup CUDA_SURFOBJECT Surface Object Management - * - * ___MANBRIEF___ surface object management functions of the low-level CUDA - * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the surface object management functions of the - * low-level CUDA driver application programming interface. The surface - * object API is only supported on devices of compute capability 3.0 or higher. - * - * @{ - */ - -/** - * \brief Creates a surface object - * - * Creates a surface object and returns it in \p pSurfObject. \p pResDesc describes - * the data to perform surface load/stores on. ::CUDA_RESOURCE_DESC::resType must be - * ::CU_RESOURCE_TYPE_ARRAY and ::CUDA_RESOURCE_DESC::res::array::hArray - * must be set to a valid CUDA array handle. ::CUDA_RESOURCE_DESC::flags must be set to zero. - * - * Surface objects are only supported on devices of compute capability 3.0 or higher. - * Additionally, a surface object is an opaque value, and, as such, should only be - * accessed through CUDA API calls. - * - * \param pSurfObject - Surface object to create - * \param pResDesc - Resource descriptor - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::cuSurfObjectDestroy, - * ::cudaCreateSurfaceObject - */ -CUresult CUDAAPI cuSurfObjectCreate(CUsurfObject *pSurfObject, const CUDA_RESOURCE_DESC *pResDesc); - -/** - * \brief Destroys a surface object - * - * Destroys the surface object specified by \p surfObject. - * - * \param surfObject - Surface object to destroy - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::cuSurfObjectCreate, - * ::cudaDestroySurfaceObject - */ -CUresult CUDAAPI cuSurfObjectDestroy(CUsurfObject surfObject); - -/** - * \brief Returns a surface object's resource descriptor - * - * Returns the resource descriptor for the surface object specified by \p surfObject. - * - * \param pResDesc - Resource descriptor - * \param surfObject - Surface object - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE - * - * \sa - * ::cuSurfObjectCreate, - * ::cudaGetSurfaceObjectResourceDesc - */ -CUresult CUDAAPI cuSurfObjectGetResourceDesc(CUDA_RESOURCE_DESC *pResDesc, CUsurfObject surfObject); - -/** @} */ /* END CUDA_SURFOBJECT */ - -/** - * \defgroup CUDA_PEER_ACCESS Peer Context Memory Access - * - * ___MANBRIEF___ direct peer context memory access functions of the low-level - * CUDA driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the direct peer context memory access functions - * of the low-level CUDA driver application programming interface. - * - * @{ - */ - -/** - * \brief Queries if a device may directly access a peer device's memory. - * - * Returns in \p *canAccessPeer a value of 1 if contexts on \p dev are capable of - * directly accessing memory from contexts on \p peerDev and 0 otherwise. - * If direct access of \p peerDev from \p dev is possible, then access may be - * enabled on two specific contexts by calling ::cuCtxEnablePeerAccess(). - * - * \param canAccessPeer - Returned access capability - * \param dev - Device from which allocations on \p peerDev are to - * be directly accessed. - * \param peerDev - Device on which the allocations to be directly accessed - * by \p dev reside. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_DEVICE - * \notefnerr - * - * \sa - * ::cuCtxEnablePeerAccess, - * ::cuCtxDisablePeerAccess, - * ::cudaDeviceCanAccessPeer - */ -CUresult CUDAAPI cuDeviceCanAccessPeer(int *canAccessPeer, CUdevice dev, CUdevice peerDev); - -/** - * \brief Enables direct access to memory allocations in a peer context. - * - * If both the current context and \p peerContext are on devices which support unified - * addressing (as may be queried using ::CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING) and same - * major compute capability, then on success all allocations from \p peerContext will - * immediately be accessible by the current context. See \ref CUDA_UNIFIED for additional - * details. - * - * Note that access granted by this call is unidirectional and that in order to access - * memory from the current context in \p peerContext, a separate symmetric call - * to ::cuCtxEnablePeerAccess() is required. - * - * Note that there are both device-wide and system-wide limitations per system - * configuration, as noted in the CUDA Programming Guide under the section - * "Peer-to-Peer Memory Access". - * - * Returns ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED if ::cuDeviceCanAccessPeer() indicates - * that the ::CUdevice of the current context cannot directly access memory - * from the ::CUdevice of \p peerContext. - * - * Returns ::CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED if direct access of - * \p peerContext from the current context has already been enabled. - * - * Returns ::CUDA_ERROR_TOO_MANY_PEERS if direct peer access is not possible - * because hardware resources required for peer access have been exhausted. - * - * Returns ::CUDA_ERROR_INVALID_CONTEXT if there is no current context, \p peerContext - * is not a valid context, or if the current context is \p peerContext. - * - * Returns ::CUDA_ERROR_INVALID_VALUE if \p Flags is not 0. - * - * \param peerContext - Peer context to enable direct access to from the current context - * \param Flags - Reserved for future use and must be set to 0 - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED, - * ::CUDA_ERROR_TOO_MANY_PEERS, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_PEER_ACCESS_UNSUPPORTED, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa - * ::cuDeviceCanAccessPeer, - * ::cuCtxDisablePeerAccess, - * ::cudaDeviceEnablePeerAccess - */ -CUresult CUDAAPI cuCtxEnablePeerAccess(CUcontext peerContext, unsigned int Flags); - -/** - * \brief Disables direct access to memory allocations in a peer context and - * unregisters any registered allocations. - * - Returns ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED if direct peer access has - * not yet been enabled from \p peerContext to the current context. - * - * Returns ::CUDA_ERROR_INVALID_CONTEXT if there is no current context, or if - * \p peerContext is not a valid context. - * - * \param peerContext - Peer context to disable direct access to - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_PEER_ACCESS_NOT_ENABLED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * \notefnerr - * - * \sa - * ::cuDeviceCanAccessPeer, - * ::cuCtxEnablePeerAccess, - * ::cudaDeviceDisablePeerAccess - */ -CUresult CUDAAPI cuCtxDisablePeerAccess(CUcontext peerContext); - -/** - * \brief Queries attributes of the link between two devices. - * - * Returns in \p *value the value of the requested attribute \p attrib of the - * link between \p srcDevice and \p dstDevice. The supported attributes are: - * - ::CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK: A relative value indicating the - * performance of the link between two devices. - * - ::CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED P2P: 1 if P2P Access is enable. - * - ::CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED: 1 if Atomic operations over - * the link are supported. - * - ::CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED: 1 if cudaArray can - * be accessed over the link. - * - * Returns ::CUDA_ERROR_INVALID_DEVICE if \p srcDevice or \p dstDevice are not valid - * or if they represent the same device. - * - * Returns ::CUDA_ERROR_INVALID_VALUE if \p attrib is not valid or if \p value is - * a null pointer. - * - * \param value - Returned value of the requested attribute - * \param attrib - The requested attribute of the link between \p srcDevice and \p dstDevice. - * \param srcDevice - The source device of the target link. - * \param dstDevice - The destination device of the target link. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_DEVICE, - * ::CUDA_ERROR_INVALID_VALUE - * \notefnerr - * - * \sa - * ::cuCtxEnablePeerAccess, - * ::cuCtxDisablePeerAccess, - * ::cuDeviceCanAccessPeer, - * ::cudaDeviceGetP2PAttribute - */ -CUresult CUDAAPI cuDeviceGetP2PAttribute(int* value, CUdevice_P2PAttribute attrib, CUdevice srcDevice, CUdevice dstDevice); - -/** @} */ /* END CUDA_PEER_ACCESS */ - -/** - * \defgroup CUDA_GRAPHICS Graphics Interoperability - * - * ___MANBRIEF___ graphics interoperability functions of the low-level CUDA - * driver API (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the graphics interoperability functions of the - * low-level CUDA driver application programming interface. - * - * @{ - */ - -/** - * \brief Unregisters a graphics resource for access by CUDA - * - * Unregisters the graphics resource \p resource so it is not accessible by - * CUDA unless registered again. - * - * If \p resource is invalid then ::CUDA_ERROR_INVALID_HANDLE is - * returned. - * - * \param resource - Resource to unregister - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_UNKNOWN - * \notefnerr - * - * \sa - * ::cuGraphicsD3D9RegisterResource, - * ::cuGraphicsD3D10RegisterResource, - * ::cuGraphicsD3D11RegisterResource, - * ::cuGraphicsGLRegisterBuffer, - * ::cuGraphicsGLRegisterImage, - * ::cudaGraphicsUnregisterResource - */ -CUresult CUDAAPI cuGraphicsUnregisterResource(CUgraphicsResource resource); - -/** - * \brief Get an array through which to access a subresource of a mapped graphics resource. - * - * Returns in \p *pArray an array through which the subresource of the mapped - * graphics resource \p resource which corresponds to array index \p arrayIndex - * and mipmap level \p mipLevel may be accessed. The value set in \p *pArray may - * change every time that \p resource is mapped. - * - * If \p resource is not a texture then it cannot be accessed via an array and - * ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY is returned. - * If \p arrayIndex is not a valid array index for \p resource then - * ::CUDA_ERROR_INVALID_VALUE is returned. - * If \p mipLevel is not a valid mipmap level for \p resource then - * ::CUDA_ERROR_INVALID_VALUE is returned. - * If \p resource is not mapped then ::CUDA_ERROR_NOT_MAPPED is returned. - * - * \param pArray - Returned array through which a subresource of \p resource may be accessed - * \param resource - Mapped resource to access - * \param arrayIndex - Array index for array textures or cubemap face - * index as defined by ::CUarray_cubemap_face for - * cubemap textures for the subresource to access - * \param mipLevel - Mipmap level for the subresource to access - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_NOT_MAPPED, - * ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY - * \notefnerr - * - * \sa - * ::cuGraphicsResourceGetMappedPointer, - * ::cudaGraphicsSubResourceGetMappedArray - */ -CUresult CUDAAPI cuGraphicsSubResourceGetMappedArray(CUarray *pArray, CUgraphicsResource resource, unsigned int arrayIndex, unsigned int mipLevel); - -/** - * \brief Get a mipmapped array through which to access a mapped graphics resource. - * - * Returns in \p *pMipmappedArray a mipmapped array through which the mapped graphics - * resource \p resource. The value set in \p *pMipmappedArray may change every time - * that \p resource is mapped. - * - * If \p resource is not a texture then it cannot be accessed via a mipmapped array and - * ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY is returned. - * If \p resource is not mapped then ::CUDA_ERROR_NOT_MAPPED is returned. - * - * \param pMipmappedArray - Returned mipmapped array through which \p resource may be accessed - * \param resource - Mapped resource to access - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_NOT_MAPPED, - * ::CUDA_ERROR_NOT_MAPPED_AS_ARRAY - * \notefnerr - * - * \sa - * ::cuGraphicsResourceGetMappedPointer, - * ::cudaGraphicsResourceGetMappedMipmappedArray - */ -CUresult CUDAAPI cuGraphicsResourceGetMappedMipmappedArray(CUmipmappedArray *pMipmappedArray, CUgraphicsResource resource); - -/** - * \brief Get a device pointer through which to access a mapped graphics resource. - * - * Returns in \p *pDevPtr a pointer through which the mapped graphics resource - * \p resource may be accessed. - * Returns in \p pSize the size of the memory in bytes which may be accessed from that pointer. - * The value set in \p pPointer may change every time that \p resource is mapped. - * - * If \p resource is not a buffer then it cannot be accessed via a pointer and - * ::CUDA_ERROR_NOT_MAPPED_AS_POINTER is returned. - * If \p resource is not mapped then ::CUDA_ERROR_NOT_MAPPED is returned. - * * - * \param pDevPtr - Returned pointer through which \p resource may be accessed - * \param pSize - Returned size of the buffer accessible starting at \p *pPointer - * \param resource - Mapped resource to access - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_NOT_MAPPED, - * ::CUDA_ERROR_NOT_MAPPED_AS_POINTER - * \notefnerr - * - * \sa - * ::cuGraphicsMapResources, - * ::cuGraphicsSubResourceGetMappedArray, - * ::cudaGraphicsResourceGetMappedPointer - */ -CUresult CUDAAPI cuGraphicsResourceGetMappedPointer(CUdeviceptr *pDevPtr, size_t *pSize, CUgraphicsResource resource); - -/** - * \brief Set usage flags for mapping a graphics resource - * - * Set \p flags for mapping the graphics resource \p resource. - * - * Changes to \p flags will take effect the next time \p resource is mapped. - * The \p flags argument may be any of the following: - - * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE: Specifies no hints about how this - * resource will be used. It is therefore assumed that this resource will be - * read from and written to by CUDA kernels. This is the default value. - * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_READONLY: Specifies that CUDA kernels which - * access this resource will not write to this resource. - * - ::CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITEDISCARD: Specifies that CUDA kernels - * which access this resource will not read from this resource and will - * write over the entire contents of the resource, so none of the data - * previously stored in the resource will be preserved. - * - * If \p resource is presently mapped for access by CUDA then - * ::CUDA_ERROR_ALREADY_MAPPED is returned. - * If \p flags is not one of the above values then ::CUDA_ERROR_INVALID_VALUE is returned. - * - * \param resource - Registered resource to set flags for - * \param flags - Parameters for resource mapping - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_ALREADY_MAPPED - * \notefnerr - * - * \sa - * ::cuGraphicsMapResources, - * ::cudaGraphicsResourceSetMapFlags - */ -CUresult CUDAAPI cuGraphicsResourceSetMapFlags(CUgraphicsResource resource, unsigned int flags); - -/** - * \brief Map graphics resources for access by CUDA - * - * Maps the \p count graphics resources in \p resources for access by CUDA. - * - * The resources in \p resources may be accessed by CUDA until they - * are unmapped. The graphics API from which \p resources were registered - * should not access any resources while they are mapped by CUDA. If an - * application does so, the results are undefined. - * - * This function provides the synchronization guarantee that any graphics calls - * issued before ::cuGraphicsMapResources() will complete before any subsequent CUDA - * work issued in \p stream begins. - * - * If \p resources includes any duplicate entries then ::CUDA_ERROR_INVALID_HANDLE is returned. - * If any of \p resources are presently mapped for access by CUDA then ::CUDA_ERROR_ALREADY_MAPPED is returned. - * - * \param count - Number of resources to map - * \param resources - Resources to map for CUDA usage - * \param hStream - Stream with which to synchronize - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_ALREADY_MAPPED, - * ::CUDA_ERROR_UNKNOWN - * \note_null_stream - * \notefnerr - * - * \sa - * ::cuGraphicsResourceGetMappedPointer, - * ::cuGraphicsSubResourceGetMappedArray, - * ::cuGraphicsUnmapResources, - * ::cudaGraphicsMapResources - */ -CUresult CUDAAPI cuGraphicsMapResources(unsigned int count, CUgraphicsResource *resources, CUstream hStream); - -/** - * \brief Unmap graphics resources. - * - * Unmaps the \p count graphics resources in \p resources. - * - * Once unmapped, the resources in \p resources may not be accessed by CUDA - * until they are mapped again. - * - * This function provides the synchronization guarantee that any CUDA work issued - * in \p stream before ::cuGraphicsUnmapResources() will complete before any - * subsequently issued graphics work begins. - * - * - * If \p resources includes any duplicate entries then ::CUDA_ERROR_INVALID_HANDLE is returned. - * If any of \p resources are not presently mapped for access by CUDA then ::CUDA_ERROR_NOT_MAPPED is returned. - * - * \param count - Number of resources to unmap - * \param resources - Resources to unmap - * \param hStream - Stream with which to synchronize - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_DEINITIALIZED, - * ::CUDA_ERROR_NOT_INITIALIZED, - * ::CUDA_ERROR_INVALID_CONTEXT, - * ::CUDA_ERROR_INVALID_HANDLE, - * ::CUDA_ERROR_NOT_MAPPED, - * ::CUDA_ERROR_UNKNOWN - * \note_null_stream - * \notefnerr - * - * \sa - * ::cuGraphicsMapResources, - * ::cudaGraphicsUnmapResources - */ -CUresult CUDAAPI cuGraphicsUnmapResources(unsigned int count, CUgraphicsResource *resources, CUstream hStream); - -/** @} */ /* END CUDA_GRAPHICS */ - -/** - * \defgroup CUDA_DRIVER_ENTRY_POINT Driver Entry Point Access - * - * ___MANBRIEF___ driver entry point access functions of the low-level CUDA driver API - * (___CURRENT_FILE___) ___ENDMANBRIEF___ - * - * This section describes the driver entry point access functions of the low-level CUDA - * driver application programming interface. - * - * @{ - */ - -/** - * \brief Returns the requested driver API function pointer - * - * Returns in \p **pfn the address of the CUDA driver function for the requested - * CUDA version and flags. - * - * The CUDA version is specified as (1000 * major + 10 * minor), so CUDA 11.2 - * should be specified as 11020. For a requested driver symbol, if the specified - * CUDA version is greater than or equal to the CUDA version in which the driver symbol - * was introduced, this API will return the function pointer to the corresponding - * versioned function. - * - * The pointer returned by the API should be cast to a function pointer matching the - * requested driver function's definition in the API header file. The function pointer - * typedef can be picked up from the corresponding typedefs header file. For example, - * cudaTypedefs.h consists of function pointer typedefs for driver APIs defined in cuda.h. - * - * The API will return ::CUDA_ERROR_NOT_FOUND if the requested driver function is not - * supported on the platform, no ABI compatible driver function exists for the specified - * \p cudaVersion or if the driver symbol is invalid. - * - * The requested flags can be: - * - ::CU_GET_PROC_ADDRESS_DEFAULT: This is the default mode. This is equivalent to - * ::CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM if the code is compiled with - * --default-stream per-thread compilation flag or the macro CUDA_API_PER_THREAD_DEFAULT_STREAM - * is defined; ::CU_GET_PROC_ADDRESS_LEGACY_STREAM otherwise. - * - ::CU_GET_PROC_ADDRESS_LEGACY_STREAM: This will enable the search for all driver symbols - * that match the requested driver symbol name except the corresponding per-thread versions. - * - ::CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM: This will enable the search for all - * driver symbols that match the requested driver symbol name including the per-thread - * versions. If a per-thread version is not found, the API will return the legacy version - * of the driver function. - * - * \param symbol - The base name of the driver API function to look for. As an example, - * for the driver API ::cuMemAlloc_v2, \p symbol would be cuMemAlloc and - * \p cudaVersion would be the ABI compatible CUDA version for the _v2 variant. - * \param pfn - Location to return the function pointer to the requested driver function - * \param cudaVersion - The CUDA version to look for the requested driver symbol - * \param flags - Flags to specify search options. - * - * \return - * ::CUDA_SUCCESS, - * ::CUDA_ERROR_INVALID_VALUE, - * ::CUDA_ERROR_NOT_SUPPORTED, - * ::CUDA_ERROR_NOT_FOUND - * \note_version_mixing - * - * \sa - * ::cudaGetDriverEntryPoint - */ -CUresult CUDAAPI cuGetProcAddress(const char *symbol, void **pfn, int cudaVersion, cuuint64_t flags); - -/** @} */ /* END CUDA_DRIVER_ENTRY_POINT */ - -CUresult CUDAAPI cuGetExportTable(const void **ppExportTable, const CUuuid *pExportTableId); - -/** - * CUDA API versioning support - */ -#if defined(__CUDA_API_VERSION_INTERNAL) - #undef cuMemHostRegister - #undef cuGraphicsResourceSetMapFlags - #undef cuLinkCreate - #undef cuLinkAddData - #undef cuLinkAddFile - #undef cuDeviceTotalMem - #undef cuCtxCreate - #undef cuModuleGetGlobal - #undef cuMemGetInfo - #undef cuMemAlloc - #undef cuMemAllocPitch - #undef cuMemFree - #undef cuMemGetAddressRange - #undef cuMemAllocHost - #undef cuMemHostGetDevicePointer - #undef cuMemcpyHtoD - #undef cuMemcpyDtoH - #undef cuMemcpyDtoD - #undef cuMemcpyDtoA - #undef cuMemcpyAtoD - #undef cuMemcpyHtoA - #undef cuMemcpyAtoH - #undef cuMemcpyAtoA - #undef cuMemcpyHtoAAsync - #undef cuMemcpyAtoHAsync - #undef cuMemcpy2D - #undef cuMemcpy2DUnaligned - #undef cuMemcpy3D - #undef cuMemcpyHtoDAsync - #undef cuMemcpyDtoHAsync - #undef cuMemcpyDtoDAsync - #undef cuMemcpy2DAsync - #undef cuMemcpy3DAsync - #undef cuMemsetD8 - #undef cuMemsetD16 - #undef cuMemsetD32 - #undef cuMemsetD2D8 - #undef cuMemsetD2D16 - #undef cuMemsetD2D32 - #undef cuArrayCreate - #undef cuArrayGetDescriptor - #undef cuArray3DCreate - #undef cuArray3DGetDescriptor - #undef cuTexRefSetAddress - #undef cuTexRefSetAddress2D - #undef cuTexRefGetAddress - #undef cuGraphicsResourceGetMappedPointer - #undef cuCtxDestroy - #undef cuCtxPopCurrent - #undef cuCtxPushCurrent - #undef cuStreamDestroy - #undef cuEventDestroy - #undef cuMemcpy - #undef cuMemcpyAsync - #undef cuMemcpyPeer - #undef cuMemcpyPeerAsync - #undef cuMemcpy3DPeer - #undef cuMemcpy3DPeerAsync - #undef cuMemsetD8Async - #undef cuMemsetD16Async - #undef cuMemsetD32Async - #undef cuMemsetD2D8Async - #undef cuMemsetD2D16Async - #undef cuMemsetD2D32Async - #undef cuStreamGetPriority - #undef cuStreamGetFlags - #undef cuStreamGetCtx - #undef cuStreamWaitEvent - #undef cuStreamAddCallback - #undef cuStreamAttachMemAsync - #undef cuStreamQuery - #undef cuStreamSynchronize - #undef cuEventRecord - #undef cuEventRecordWithFlags - #undef cuLaunchKernel - #undef cuLaunchHostFunc - #undef cuGraphicsMapResources - #undef cuGraphicsUnmapResources - #undef cuStreamWriteValue32 - #undef cuStreamWaitValue32 - #undef cuStreamWriteValue64 - #undef cuStreamWaitValue64 - #undef cuStreamBatchMemOp - #undef cuMemPrefetchAsync - #undef cuLaunchCooperativeKernel - #undef cuSignalExternalSemaphoresAsync - #undef cuWaitExternalSemaphoresAsync - #undef cuStreamBeginCapture - #undef cuStreamEndCapture - #undef cuStreamIsCapturing - #undef cuStreamGetCaptureInfo - #undef cuStreamGetCaptureInfo_v2 - #undef cuGraphUpload - #undef cuGraphLaunch - #undef cuDevicePrimaryCtxRelease - #undef cuDevicePrimaryCtxReset - #undef cuDevicePrimaryCtxSetFlags - #undef cuIpcOpenMemHandle - #undef cuStreamCopyAttributes - #undef cuStreamSetAttribute - #undef cuStreamGetAttribute - #undef cuGraphInstantiate - #undef cuMemMapArrayAsync - #undef cuMemFreeAsync - #undef cuMemAllocAsync - #undef cuMemAllocFromPoolAsync - #undef cuStreamUpdateCaptureDependencies - - CUresult CUDAAPI cuMemHostRegister(void *p, size_t bytesize, unsigned int Flags); - CUresult CUDAAPI cuGraphicsResourceSetMapFlags(CUgraphicsResource resource, unsigned int flags); - CUresult CUDAAPI cuLinkCreate(unsigned int numOptions, CUjit_option *options, void **optionValues, CUlinkState *stateOut); - CUresult CUDAAPI cuLinkAddData(CUlinkState state, CUjitInputType type, void *data, size_t size, const char *name, - unsigned int numOptions, CUjit_option *options, void **optionValues); - CUresult CUDAAPI cuLinkAddFile(CUlinkState state, CUjitInputType type, const char *path, - unsigned int numOptions, CUjit_option *options, void **optionValues); - CUresult CUDAAPI cuTexRefSetAddress2D_v2(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR *desc, CUdeviceptr dptr, size_t Pitch); - - typedef unsigned int CUdeviceptr_v1; - - typedef struct CUDA_MEMCPY2D_v1_st - { - unsigned int srcXInBytes; /**< Source X in bytes */ - unsigned int srcY; /**< Source Y */ - CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */ - const void *srcHost; /**< Source host pointer */ - CUdeviceptr_v1 srcDevice; /**< Source device pointer */ - CUarray srcArray; /**< Source array reference */ - unsigned int srcPitch; /**< Source pitch (ignored when src is array) */ - - unsigned int dstXInBytes; /**< Destination X in bytes */ - unsigned int dstY; /**< Destination Y */ - CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */ - void *dstHost; /**< Destination host pointer */ - CUdeviceptr_v1 dstDevice; /**< Destination device pointer */ - CUarray dstArray; /**< Destination array reference */ - unsigned int dstPitch; /**< Destination pitch (ignored when dst is array) */ - - unsigned int WidthInBytes; /**< Width of 2D memory copy in bytes */ - unsigned int Height; /**< Height of 2D memory copy */ - } CUDA_MEMCPY2D_v1; - - typedef struct CUDA_MEMCPY3D_v1_st - { - unsigned int srcXInBytes; /**< Source X in bytes */ - unsigned int srcY; /**< Source Y */ - unsigned int srcZ; /**< Source Z */ - unsigned int srcLOD; /**< Source LOD */ - CUmemorytype srcMemoryType; /**< Source memory type (host, device, array) */ - const void *srcHost; /**< Source host pointer */ - CUdeviceptr_v1 srcDevice; /**< Source device pointer */ - CUarray srcArray; /**< Source array reference */ - void *reserved0; /**< Must be NULL */ - unsigned int srcPitch; /**< Source pitch (ignored when src is array) */ - unsigned int srcHeight; /**< Source height (ignored when src is array; may be 0 if Depth==1) */ - - unsigned int dstXInBytes; /**< Destination X in bytes */ - unsigned int dstY; /**< Destination Y */ - unsigned int dstZ; /**< Destination Z */ - unsigned int dstLOD; /**< Destination LOD */ - CUmemorytype dstMemoryType; /**< Destination memory type (host, device, array) */ - void *dstHost; /**< Destination host pointer */ - CUdeviceptr_v1 dstDevice; /**< Destination device pointer */ - CUarray dstArray; /**< Destination array reference */ - void *reserved1; /**< Must be NULL */ - unsigned int dstPitch; /**< Destination pitch (ignored when dst is array) */ - unsigned int dstHeight; /**< Destination height (ignored when dst is array; may be 0 if Depth==1) */ - - unsigned int WidthInBytes; /**< Width of 3D memory copy in bytes */ - unsigned int Height; /**< Height of 3D memory copy */ - unsigned int Depth; /**< Depth of 3D memory copy */ - } CUDA_MEMCPY3D_v1; - - typedef struct CUDA_ARRAY_DESCRIPTOR_v1_st - { - unsigned int Width; /**< Width of array */ - unsigned int Height; /**< Height of array */ - - CUarray_format Format; /**< Array format */ - unsigned int NumChannels; /**< Channels per array element */ - } CUDA_ARRAY_DESCRIPTOR_v1; - - typedef struct CUDA_ARRAY3D_DESCRIPTOR_v1_st - { - unsigned int Width; /**< Width of 3D array */ - unsigned int Height; /**< Height of 3D array */ - unsigned int Depth; /**< Depth of 3D array */ - - CUarray_format Format; /**< Array format */ - unsigned int NumChannels; /**< Channels per array element */ - unsigned int Flags; /**< Flags */ - } CUDA_ARRAY3D_DESCRIPTOR_v1; - - CUresult CUDAAPI cuDeviceTotalMem(unsigned int *bytes, CUdevice dev); - CUresult CUDAAPI cuCtxCreate(CUcontext *pctx, unsigned int flags, CUdevice dev); - CUresult CUDAAPI cuModuleGetGlobal(CUdeviceptr_v1 *dptr, unsigned int *bytes, CUmodule hmod, const char *name); - CUresult CUDAAPI cuMemGetInfo(unsigned int *free, unsigned int *total); - CUresult CUDAAPI cuMemAlloc(CUdeviceptr_v1 *dptr, unsigned int bytesize); - CUresult CUDAAPI cuMemAllocPitch(CUdeviceptr_v1 *dptr, unsigned int *pPitch, unsigned int WidthInBytes, unsigned int Height, unsigned int ElementSizeBytes); - CUresult CUDAAPI cuMemFree(CUdeviceptr_v1 dptr); - CUresult CUDAAPI cuMemGetAddressRange(CUdeviceptr_v1 *pbase, unsigned int *psize, CUdeviceptr_v1 dptr); - CUresult CUDAAPI cuMemAllocHost(void **pp, unsigned int bytesize); - CUresult CUDAAPI cuMemHostGetDevicePointer(CUdeviceptr_v1 *pdptr, void *p, unsigned int Flags); - CUresult CUDAAPI cuMemcpyHtoD(CUdeviceptr_v1 dstDevice, const void *srcHost, unsigned int ByteCount); - CUresult CUDAAPI cuMemcpyDtoH(void *dstHost, CUdeviceptr_v1 srcDevice, unsigned int ByteCount); - CUresult CUDAAPI cuMemcpyDtoD(CUdeviceptr_v1 dstDevice, CUdeviceptr_v1 srcDevice, unsigned int ByteCount); - CUresult CUDAAPI cuMemcpyDtoA(CUarray dstArray, unsigned int dstOffset, CUdeviceptr_v1 srcDevice, unsigned int ByteCount); - CUresult CUDAAPI cuMemcpyAtoD(CUdeviceptr_v1 dstDevice, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount); - CUresult CUDAAPI cuMemcpyHtoA(CUarray dstArray, unsigned int dstOffset, const void *srcHost, unsigned int ByteCount); - CUresult CUDAAPI cuMemcpyAtoH(void *dstHost, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount); - CUresult CUDAAPI cuMemcpyAtoA(CUarray dstArray, unsigned int dstOffset, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount); - CUresult CUDAAPI cuMemcpyHtoAAsync(CUarray dstArray, unsigned int dstOffset, const void *srcHost, unsigned int ByteCount, CUstream hStream); - CUresult CUDAAPI cuMemcpyAtoHAsync(void *dstHost, CUarray srcArray, unsigned int srcOffset, unsigned int ByteCount, CUstream hStream); - CUresult CUDAAPI cuMemcpy2D(const CUDA_MEMCPY2D_v1 *pCopy); - CUresult CUDAAPI cuMemcpy2DUnaligned(const CUDA_MEMCPY2D_v1 *pCopy); - CUresult CUDAAPI cuMemcpy3D(const CUDA_MEMCPY3D_v1 *pCopy); - CUresult CUDAAPI cuMemcpyHtoDAsync(CUdeviceptr_v1 dstDevice, const void *srcHost, unsigned int ByteCount, CUstream hStream); - CUresult CUDAAPI cuMemcpyDtoHAsync(void *dstHost, CUdeviceptr_v1 srcDevice, unsigned int ByteCount, CUstream hStream); - CUresult CUDAAPI cuMemcpyDtoDAsync(CUdeviceptr_v1 dstDevice, CUdeviceptr_v1 srcDevice, unsigned int ByteCount, CUstream hStream); - CUresult CUDAAPI cuMemcpy2DAsync(const CUDA_MEMCPY2D_v1 *pCopy, CUstream hStream); - CUresult CUDAAPI cuMemcpy3DAsync(const CUDA_MEMCPY3D_v1 *pCopy, CUstream hStream); - CUresult CUDAAPI cuMemsetD8(CUdeviceptr_v1 dstDevice, unsigned char uc, unsigned int N); - CUresult CUDAAPI cuMemsetD16(CUdeviceptr_v1 dstDevice, unsigned short us, unsigned int N); - CUresult CUDAAPI cuMemsetD32(CUdeviceptr_v1 dstDevice, unsigned int ui, unsigned int N); - CUresult CUDAAPI cuMemsetD2D8(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned char uc, unsigned int Width, unsigned int Height); - CUresult CUDAAPI cuMemsetD2D16(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned short us, unsigned int Width, unsigned int Height); - CUresult CUDAAPI cuMemsetD2D32(CUdeviceptr_v1 dstDevice, unsigned int dstPitch, unsigned int ui, unsigned int Width, unsigned int Height); - CUresult CUDAAPI cuArrayCreate(CUarray *pHandle, const CUDA_ARRAY_DESCRIPTOR_v1 *pAllocateArray); - CUresult CUDAAPI cuArrayGetDescriptor(CUDA_ARRAY_DESCRIPTOR_v1 *pArrayDescriptor, CUarray hArray); - CUresult CUDAAPI cuArray3DCreate(CUarray *pHandle, const CUDA_ARRAY3D_DESCRIPTOR_v1 *pAllocateArray); - CUresult CUDAAPI cuArray3DGetDescriptor(CUDA_ARRAY3D_DESCRIPTOR_v1 *pArrayDescriptor, CUarray hArray); - CUresult CUDAAPI cuTexRefSetAddress(unsigned int *ByteOffset, CUtexref hTexRef, CUdeviceptr_v1 dptr, unsigned int bytes); - CUresult CUDAAPI cuTexRefSetAddress2D(CUtexref hTexRef, const CUDA_ARRAY_DESCRIPTOR_v1 *desc, CUdeviceptr_v1 dptr, unsigned int Pitch); - CUresult CUDAAPI cuTexRefGetAddress(CUdeviceptr_v1 *pdptr, CUtexref hTexRef); - CUresult CUDAAPI cuGraphicsResourceGetMappedPointer(CUdeviceptr_v1 *pDevPtr, unsigned int *pSize, CUgraphicsResource resource); - - CUresult CUDAAPI cuCtxDestroy(CUcontext ctx); - CUresult CUDAAPI cuCtxPopCurrent(CUcontext *pctx); - CUresult CUDAAPI cuCtxPushCurrent(CUcontext ctx); - CUresult CUDAAPI cuStreamDestroy(CUstream hStream); - CUresult CUDAAPI cuEventDestroy(CUevent hEvent); - CUresult CUDAAPI cuDevicePrimaryCtxRelease(CUdevice dev); - CUresult CUDAAPI cuDevicePrimaryCtxReset(CUdevice dev); - CUresult CUDAAPI cuDevicePrimaryCtxSetFlags(CUdevice dev, unsigned int flags); - - CUresult CUDAAPI cuMemcpyHtoD_v2(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount); - CUresult CUDAAPI cuMemcpyDtoH_v2(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount); - CUresult CUDAAPI cuMemcpyDtoD_v2(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount); - CUresult CUDAAPI cuMemcpyDtoA_v2(CUarray dstArray, size_t dstOffset, CUdeviceptr srcDevice, size_t ByteCount); - CUresult CUDAAPI cuMemcpyAtoD_v2(CUdeviceptr dstDevice, CUarray srcArray, size_t srcOffset, size_t ByteCount); - CUresult CUDAAPI cuMemcpyHtoA_v2(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount); - CUresult CUDAAPI cuMemcpyAtoH_v2(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount); - CUresult CUDAAPI cuMemcpyAtoA_v2(CUarray dstArray, size_t dstOffset, CUarray srcArray, size_t srcOffset, size_t ByteCount); - CUresult CUDAAPI cuMemcpyHtoAAsync_v2(CUarray dstArray, size_t dstOffset, const void *srcHost, size_t ByteCount, CUstream hStream); - CUresult CUDAAPI cuMemcpyAtoHAsync_v2(void *dstHost, CUarray srcArray, size_t srcOffset, size_t ByteCount, CUstream hStream); - CUresult CUDAAPI cuMemcpy2D_v2(const CUDA_MEMCPY2D *pCopy); - CUresult CUDAAPI cuMemcpy2DUnaligned_v2(const CUDA_MEMCPY2D *pCopy); - CUresult CUDAAPI cuMemcpy3D_v2(const CUDA_MEMCPY3D *pCopy); - CUresult CUDAAPI cuMemcpyHtoDAsync_v2(CUdeviceptr dstDevice, const void *srcHost, size_t ByteCount, CUstream hStream); - CUresult CUDAAPI cuMemcpyDtoHAsync_v2(void *dstHost, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream); - CUresult CUDAAPI cuMemcpyDtoDAsync_v2(CUdeviceptr dstDevice, CUdeviceptr srcDevice, size_t ByteCount, CUstream hStream); - CUresult CUDAAPI cuMemcpy2DAsync_v2(const CUDA_MEMCPY2D *pCopy, CUstream hStream); - CUresult CUDAAPI cuMemcpy3DAsync_v2(const CUDA_MEMCPY3D *pCopy, CUstream hStream); - CUresult CUDAAPI cuMemsetD8_v2(CUdeviceptr dstDevice, unsigned char uc, size_t N); - CUresult CUDAAPI cuMemsetD16_v2(CUdeviceptr dstDevice, unsigned short us, size_t N); - CUresult CUDAAPI cuMemsetD32_v2(CUdeviceptr dstDevice, unsigned int ui, size_t N); - CUresult CUDAAPI cuMemsetD2D8_v2(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height); - CUresult CUDAAPI cuMemsetD2D16_v2(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height); - CUresult CUDAAPI cuMemsetD2D32_v2(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height); - CUresult CUDAAPI cuMemcpy(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount); - CUresult CUDAAPI cuMemcpyAsync(CUdeviceptr dst, CUdeviceptr src, size_t ByteCount, CUstream hStream); - CUresult CUDAAPI cuMemcpyPeer(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount); - CUresult CUDAAPI cuMemcpyPeerAsync(CUdeviceptr dstDevice, CUcontext dstContext, CUdeviceptr srcDevice, CUcontext srcContext, size_t ByteCount, CUstream hStream); - CUresult CUDAAPI cuMemcpy3DPeer(const CUDA_MEMCPY3D_PEER *pCopy); - CUresult CUDAAPI cuMemcpy3DPeerAsync(const CUDA_MEMCPY3D_PEER *pCopy, CUstream hStream); - - CUresult CUDAAPI cuMemsetD8Async(CUdeviceptr dstDevice, unsigned char uc, size_t N, CUstream hStream); - CUresult CUDAAPI cuMemsetD16Async(CUdeviceptr dstDevice, unsigned short us, size_t N, CUstream hStream); - CUresult CUDAAPI cuMemsetD32Async(CUdeviceptr dstDevice, unsigned int ui, size_t N, CUstream hStream); - CUresult CUDAAPI cuMemsetD2D8Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned char uc, size_t Width, size_t Height, CUstream hStream); - CUresult CUDAAPI cuMemsetD2D16Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned short us, size_t Width, size_t Height, CUstream hStream); - CUresult CUDAAPI cuMemsetD2D32Async(CUdeviceptr dstDevice, size_t dstPitch, unsigned int ui, size_t Width, size_t Height, CUstream hStream); - - CUresult CUDAAPI cuStreamGetPriority(CUstream hStream, int *priority); - CUresult CUDAAPI cuStreamGetFlags(CUstream hStream, unsigned int *flags); - CUresult CUDAAPI cuStreamGetCtx(CUstream hStream, CUcontext *pctx); - CUresult CUDAAPI cuStreamWaitEvent(CUstream hStream, CUevent hEvent, unsigned int Flags); - CUresult CUDAAPI cuStreamAddCallback(CUstream hStream, CUstreamCallback callback, void *userData, unsigned int flags); - CUresult CUDAAPI cuStreamAttachMemAsync(CUstream hStream, CUdeviceptr dptr, size_t length, unsigned int flags); - CUresult CUDAAPI cuStreamQuery(CUstream hStream); - CUresult CUDAAPI cuStreamSynchronize(CUstream hStream); - CUresult CUDAAPI cuEventRecord(CUevent hEvent, CUstream hStream); - CUresult CUDAAPI cuEventRecordWithFlags(CUevent hEvent, CUstream hStream, unsigned int flags); - CUresult CUDAAPI cuLaunchKernel(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams, void **extra); - CUresult CUDAAPI cuLaunchHostFunc(CUstream hStream, CUhostFn fn, void *userData); - CUresult CUDAAPI cuGraphicsMapResources(unsigned int count, CUgraphicsResource *resources, CUstream hStream); - CUresult CUDAAPI cuGraphicsUnmapResources(unsigned int count, CUgraphicsResource *resources, CUstream hStream); - CUresult CUDAAPI cuStreamWriteValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); - CUresult CUDAAPI cuStreamWaitValue32(CUstream stream, CUdeviceptr addr, cuuint32_t value, unsigned int flags); - CUresult CUDAAPI cuStreamWriteValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); - CUresult CUDAAPI cuStreamWaitValue64(CUstream stream, CUdeviceptr addr, cuuint64_t value, unsigned int flags); - CUresult CUDAAPI cuStreamBatchMemOp(CUstream stream, unsigned int count, CUstreamBatchMemOpParams *paramArray, unsigned int flags); - CUresult CUDAAPI cuMemPrefetchAsync(CUdeviceptr devPtr, size_t count, CUdevice dstDevice, CUstream hStream); - CUresult CUDAAPI cuLaunchCooperativeKernel(CUfunction f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, CUstream hStream, void **kernelParams); - CUresult CUDAAPI cuSignalExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream); - CUresult CUDAAPI cuWaitExternalSemaphoresAsync(const CUexternalSemaphore *extSemArray, const CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS *paramsArray, unsigned int numExtSems, CUstream stream); - CUresult CUDAAPI cuStreamBeginCapture(CUstream hStream); - CUresult CUDAAPI cuStreamBeginCapture_ptsz(CUstream hStream); - CUresult CUDAAPI cuStreamBeginCapture_v2(CUstream hStream, CUstreamCaptureMode mode); - CUresult CUDAAPI cuStreamEndCapture(CUstream hStream, CUgraph *phGraph); - CUresult CUDAAPI cuStreamIsCapturing(CUstream hStream, CUstreamCaptureStatus *captureStatus); - CUresult CUDAAPI cuStreamGetCaptureInfo(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out); - CUresult CUDAAPI cuStreamGetCaptureInfo_v2(CUstream hStream, CUstreamCaptureStatus *captureStatus_out, cuuint64_t *id_out, CUgraph *graph_out, const CUgraphNode **dependencies_out, size_t *numDependencies_out); - CUresult CUDAAPI cuGraphUpload(CUgraphExec hGraph, CUstream hStream); - CUresult CUDAAPI cuGraphLaunch(CUgraphExec hGraph, CUstream hStream); - CUresult CUDAAPI cuStreamCopyAttributes(CUstream dstStream, CUstream srcStream); - CUresult CUDAAPI cuStreamGetAttribute(CUstream hStream, CUstreamAttrID attr, CUstreamAttrValue *value); - CUresult CUDAAPI cuStreamSetAttribute(CUstream hStream, CUstreamAttrID attr, const CUstreamAttrValue *param); - - CUresult CUDAAPI cuIpcOpenMemHandle(CUdeviceptr *pdptr, CUipcMemHandle handle, unsigned int Flags); - CUresult CUDAAPI cuGraphInstantiate(CUgraphExec *phGraphExec, CUgraph hGraph, CUgraphNode *phErrorNode, char *logBuffer, size_t bufferSize); - CUresult CUDAAPI cuMemMapArrayAsync(CUarrayMapInfo *mapInfoList, unsigned int count, CUstream hStream); - - CUresult CUDAAPI cuMemFreeAsync(CUdeviceptr dptr, CUstream hStream); - CUresult CUDAAPI cuMemAllocAsync(CUdeviceptr *dptr, size_t bytesize, CUstream hStream); - CUresult CUDAAPI cuMemAllocFromPoolAsync(CUdeviceptr *dptr, size_t bytesize, CUmemoryPool pool, CUstream hStream); - - CUresult CUDAAPI cuStreamUpdateCaptureDependencies(CUstream hStream, CUgraphNode *dependencies, size_t numDependencies, unsigned int flags); -#elif defined(__CUDA_API_PER_THREAD_DEFAULT_STREAM) -static inline CUresult cuGetProcAddress_ptsz(const char *symbol, void **funcPtr, int driverVersion, cuuint64_t flags) { - const int procAddressMask = (CU_GET_PROC_ADDRESS_LEGACY_STREAM| - CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM); - if ((flags & procAddressMask) == 0) { - flags |= CU_GET_PROC_ADDRESS_PER_THREAD_DEFAULT_STREAM; - } - return cuGetProcAddress(symbol, funcPtr, driverVersion, flags); -} -#define cuGetProcAddress cuGetProcAddress_ptsz -#endif - -#ifdef __cplusplus -} -#endif - -#if defined(__GNUC__) - #if defined(__CUDA_API_PUSH_VISIBILITY_DEFAULT) - #pragma GCC visibility pop - #endif -#endif - -#undef __CUDA_DEPRECATED - -#endif /* __cuda_cuda_h__ */ diff --git a/include/triton/external/CUDA/nvml.h b/include/triton/external/CUDA/nvml.h deleted file mode 100755 index c3962750b0c8..000000000000 --- a/include/triton/external/CUDA/nvml.h +++ /dev/null @@ -1,6281 +0,0 @@ -/* - * Copyright 1993-2018 NVIDIA Corporation. All rights reserved. - * - * NOTICE TO USER: - * - * This source code is subject to NVIDIA ownership rights under U.S. and - * international Copyright laws. Users and possessors of this source code - * are hereby granted a nonexclusive, royalty-free license to use this code - * in individual and commercial software. - * - * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE - * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR - * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH - * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. - * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, - * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS - * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE - * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE - * OR PERFORMANCE OF THIS SOURCE CODE. - * - * U.S. Government End Users. This source code is a "commercial item" as - * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of - * "commercial computer software" and "commercial computer software - * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) - * and is provided to the U.S. Government only as a commercial end item. - * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through - * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the - * source code with only those rights set forth herein. - * - * Any use of this source code in individual and commercial software must - * include, in the user documentation and internal comments to the code, - * the above Disclaimer and U.S. Government End Users Notice. - */ - -/* -NVML API Reference - -The NVIDIA Management Library (NVML) is a C-based programmatic interface for monitoring and -managing various states within NVIDIA Tesla &tm; GPUs. It is intended to be a platform for building -3rd party applications, and is also the underlying library for the NVIDIA-supported nvidia-smi -tool. NVML is thread-safe so it is safe to make simultaneous NVML calls from multiple threads. - -API Documentation - -Supported platforms: -- Windows: Windows Server 2008 R2 64bit, Windows Server 2012 R2 64bit, Windows 7 64bit, Windows 8 64bit, Windows 10 64bit -- Linux: 32-bit and 64-bit -- Hypervisors: Windows Server 2008R2/2012 Hyper-V 64bit, Citrix XenServer 6.2 SP1+, VMware ESX 5.1/5.5 - -Supported products: -- Full Support - - All Tesla products, starting with the Fermi architecture - - All Quadro products, starting with the Fermi architecture - - All GRID products, starting with the Kepler architecture - - Selected GeForce Titan products -- Limited Support - - All Geforce products, starting with the Fermi architecture - -The NVML library can be found at \%ProgramW6432\%\\"NVIDIA Corporation"\\NVSMI\\ on Windows. It is -not be added to the system path by default. To dynamically link to NVML, add this path to the PATH -environmental variable. To dynamically load NVML, call LoadLibrary with this path. - -On Linux the NVML library will be found on the standard library path. For 64 bit Linux, both the 32 bit -and 64 bit NVML libraries will be installed. - -Online documentation for this library is available at http://docs.nvidia.com/deploy/nvml-api/index.html -*/ - -#ifndef __nvml_nvml_h__ -#define __nvml_nvml_h__ - -#ifdef __cplusplus -extern "C" { -#endif - -/* - * On Windows, set up methods for DLL export - * define NVML_STATIC_IMPORT when using nvml_loader library - */ -#if defined _WINDOWS - #if !defined NVML_STATIC_IMPORT - #if defined NVML_LIB_EXPORT - #define DECLDIR __declspec(dllexport) - #else - #define DECLDIR __declspec(dllimport) - #endif - #else - #define DECLDIR - #endif -#else - #define DECLDIR -#endif - -/** - * NVML API versioning support - */ -#define NVML_API_VERSION 10 -#define NVML_API_VERSION_STR "10" -#define nvmlInit nvmlInit_v2 -#define nvmlDeviceGetPciInfo nvmlDeviceGetPciInfo_v3 -#define nvmlDeviceGetCount nvmlDeviceGetCount_v2 -#define nvmlDeviceGetHandleByIndex nvmlDeviceGetHandleByIndex_v2 -#define nvmlDeviceGetHandleByPciBusId nvmlDeviceGetHandleByPciBusId_v2 -#define nvmlDeviceGetNvLinkRemotePciInfo nvmlDeviceGetNvLinkRemotePciInfo_v2 -#define nvmlDeviceRemoveGpu nvmlDeviceRemoveGpu_v2 - -/***************************************************************************************************/ -/** @defgroup nvmlDeviceStructs Device Structs - * @{ - */ -/***************************************************************************************************/ - -/** - * Special constant that some fields take when they are not available. - * Used when only part of the struct is not available. - * - * Each structure explicitly states when to check for this value. - */ -#define NVML_VALUE_NOT_AVAILABLE (-1) - -typedef struct nvmlDevice_st* nvmlDevice_t; - -/** - * Buffer size guaranteed to be large enough for pci bus id - */ -#define NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE 32 - -/** - * Buffer size guaranteed to be large enough for pci bus id for ::busIdLegacy - */ -#define NVML_DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE 16 - -/** - * PCI information about a GPU device. - */ -typedef struct nvmlPciInfo_st -{ - char busIdLegacy[NVML_DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE]; //!< The legacy tuple domain:bus:device.function PCI identifier (& NULL terminator) - unsigned int domain; //!< The PCI domain on which the device's bus resides, 0 to 0xffffffff - unsigned int bus; //!< The bus on which the device resides, 0 to 0xff - unsigned int device; //!< The device's id on the bus, 0 to 31 - unsigned int pciDeviceId; //!< The combined 16-bit device id and 16-bit vendor id - - // Added in NVML 2.285 API - unsigned int pciSubSystemId; //!< The 32-bit Sub System Device ID - - char busId[NVML_DEVICE_PCI_BUS_ID_BUFFER_SIZE]; //!< The tuple domain:bus:device.function PCI identifier (& NULL terminator) -} nvmlPciInfo_t; - -/** - * PCI format string for ::busIdLegacy - */ -#define NVML_DEVICE_PCI_BUS_ID_LEGACY_FMT "%04X:%02X:%02X.0" - -/** - * PCI format string for ::busId - */ -#define NVML_DEVICE_PCI_BUS_ID_FMT "%08X:%02X:%02X.0" - -/** - * Utility macro for filling the pci bus id format from a nvmlPciInfo_t - */ -#define NVML_DEVICE_PCI_BUS_ID_FMT_ARGS(pciInfo) (pciInfo)->domain, \ - (pciInfo)->bus, \ - (pciInfo)->device - -/** - * Detailed ECC error counts for a device. - * - * @deprecated Different GPU families can have different memory error counters - * See \ref nvmlDeviceGetMemoryErrorCounter - */ -typedef struct nvmlEccErrorCounts_st -{ - unsigned long long l1Cache; //!< L1 cache errors - unsigned long long l2Cache; //!< L2 cache errors - unsigned long long deviceMemory; //!< Device memory errors - unsigned long long registerFile; //!< Register file errors -} nvmlEccErrorCounts_t; - -/** - * Utilization information for a device. - * Each sample period may be between 1 second and 1/6 second, depending on the product being queried. - */ -typedef struct nvmlUtilization_st -{ - unsigned int gpu; //!< Percent of time over the past sample period during which one or more kernels was executing on the GPU - unsigned int memory; //!< Percent of time over the past sample period during which global (device) memory was being read or written -} nvmlUtilization_t; - -/** - * Memory allocation information for a device. - */ -typedef struct nvmlMemory_st -{ - unsigned long long total; //!< Total installed FB memory (in bytes) - unsigned long long free; //!< Unallocated FB memory (in bytes) - unsigned long long used; //!< Allocated FB memory (in bytes). Note that the driver/GPU always sets aside a small amount of memory for bookkeeping -} nvmlMemory_t; - -/** - * BAR1 Memory allocation Information for a device - */ -typedef struct nvmlBAR1Memory_st -{ - unsigned long long bar1Total; //!< Total BAR1 Memory (in bytes) - unsigned long long bar1Free; //!< Unallocated BAR1 Memory (in bytes) - unsigned long long bar1Used; //!< Allocated Used Memory (in bytes) -}nvmlBAR1Memory_t; - -/** - * Information about running compute processes on the GPU - */ -typedef struct nvmlProcessInfo_st -{ - unsigned int pid; //!< Process ID - unsigned long long usedGpuMemory; //!< Amount of used GPU memory in bytes. - //! Under WDDM, \ref NVML_VALUE_NOT_AVAILABLE is always reported - //! because Windows KMD manages all the memory and not the NVIDIA driver -} nvmlProcessInfo_t; - -/** - * Enum to represent type of bridge chip - */ -typedef enum nvmlBridgeChipType_enum -{ - NVML_BRIDGE_CHIP_PLX = 0, - NVML_BRIDGE_CHIP_BRO4 = 1 -}nvmlBridgeChipType_t; - -/** - * Maximum number of NvLink links supported - */ -#define NVML_NVLINK_MAX_LINKS 6 - -/** - * Enum to represent the NvLink utilization counter packet units - */ -typedef enum nvmlNvLinkUtilizationCountUnits_enum -{ - NVML_NVLINK_COUNTER_UNIT_CYCLES = 0, // count by cycles - NVML_NVLINK_COUNTER_UNIT_PACKETS = 1, // count by packets - NVML_NVLINK_COUNTER_UNIT_BYTES = 2, // count by bytes - - // this must be last - NVML_NVLINK_COUNTER_UNIT_COUNT -} nvmlNvLinkUtilizationCountUnits_t; - -/** - * Enum to represent the NvLink utilization counter packet types to count - * ** this is ONLY applicable with the units as packets or bytes - * ** as specified in \a nvmlNvLinkUtilizationCountUnits_t - * ** all packet filter descriptions are target GPU centric - * ** these can be "OR'd" together - */ -typedef enum nvmlNvLinkUtilizationCountPktTypes_enum -{ - NVML_NVLINK_COUNTER_PKTFILTER_NOP = 0x1, // no operation packets - NVML_NVLINK_COUNTER_PKTFILTER_READ = 0x2, // read packets - NVML_NVLINK_COUNTER_PKTFILTER_WRITE = 0x4, // write packets - NVML_NVLINK_COUNTER_PKTFILTER_RATOM = 0x8, // reduction atomic requests - NVML_NVLINK_COUNTER_PKTFILTER_NRATOM = 0x10, // non-reduction atomic requests - NVML_NVLINK_COUNTER_PKTFILTER_FLUSH = 0x20, // flush requests - NVML_NVLINK_COUNTER_PKTFILTER_RESPDATA = 0x40, // responses with data - NVML_NVLINK_COUNTER_PKTFILTER_RESPNODATA = 0x80, // responses without data - NVML_NVLINK_COUNTER_PKTFILTER_ALL = 0xFF // all packets -} nvmlNvLinkUtilizationCountPktTypes_t; - -/** - * Struct to define the NVLINK counter controls - */ -typedef struct nvmlNvLinkUtilizationControl_st -{ - nvmlNvLinkUtilizationCountUnits_t units; - nvmlNvLinkUtilizationCountPktTypes_t pktfilter; -} nvmlNvLinkUtilizationControl_t; - -/** - * Enum to represent NvLink queryable capabilities - */ -typedef enum nvmlNvLinkCapability_enum -{ - NVML_NVLINK_CAP_P2P_SUPPORTED = 0, // P2P over NVLink is supported - NVML_NVLINK_CAP_SYSMEM_ACCESS = 1, // Access to system memory is supported - NVML_NVLINK_CAP_P2P_ATOMICS = 2, // P2P atomics are supported - NVML_NVLINK_CAP_SYSMEM_ATOMICS= 3, // System memory atomics are supported - NVML_NVLINK_CAP_SLI_BRIDGE = 4, // SLI is supported over this link - NVML_NVLINK_CAP_VALID = 5, // Link is supported on this device - // should be last - NVML_NVLINK_CAP_COUNT -} nvmlNvLinkCapability_t; - -/** - * Enum to represent NvLink queryable error counters - */ -typedef enum nvmlNvLinkErrorCounter_enum -{ - NVML_NVLINK_ERROR_DL_REPLAY = 0, // Data link transmit replay error counter - NVML_NVLINK_ERROR_DL_RECOVERY = 1, // Data link transmit recovery error counter - NVML_NVLINK_ERROR_DL_CRC_FLIT = 2, // Data link receive flow control digit CRC error counter - NVML_NVLINK_ERROR_DL_CRC_DATA = 3, // Data link receive data CRC error counter - - // this must be last - NVML_NVLINK_ERROR_COUNT -} nvmlNvLinkErrorCounter_t; - -/** - * Represents level relationships within a system between two GPUs - * The enums are spaced to allow for future relationships - */ -typedef enum nvmlGpuLevel_enum -{ - NVML_TOPOLOGY_INTERNAL = 0, // e.g. Tesla K80 - NVML_TOPOLOGY_SINGLE = 10, // all devices that only need traverse a single PCIe switch - NVML_TOPOLOGY_MULTIPLE = 20, // all devices that need not traverse a host bridge - NVML_TOPOLOGY_HOSTBRIDGE = 30, // all devices that are connected to the same host bridge - NVML_TOPOLOGY_NODE = 40, // all devices that are connected to the same NUMA node but possibly multiple host bridges - NVML_TOPOLOGY_SYSTEM = 50, // all devices in the system - - // there is purposefully no COUNT here because of the need for spacing above -} nvmlGpuTopologyLevel_t; - -/* Compatibility for CPU->NODE renaming */ -#define NVML_TOPOLOGY_CPU NVML_TOPOLOGY_NODE - -/* P2P Capability Index Status*/ -typedef enum nvmlGpuP2PStatus_enum -{ - NVML_P2P_STATUS_OK = 0, - NVML_P2P_STATUS_CHIPSET_NOT_SUPPORTED, - NVML_P2P_STATUS_GPU_NOT_SUPPORTED, - NVML_P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED, - NVML_P2P_STATUS_DISABLED_BY_REGKEY, - NVML_P2P_STATUS_NOT_SUPPORTED, - NVML_P2P_STATUS_UNKNOWN - -} nvmlGpuP2PStatus_t; - -/* P2P Capability Index*/ -typedef enum nvmlGpuP2PCapsIndex_enum -{ - NVML_P2P_CAPS_INDEX_READ = 0, - NVML_P2P_CAPS_INDEX_WRITE, - NVML_P2P_CAPS_INDEX_NVLINK, - NVML_P2P_CAPS_INDEX_ATOMICS, - NVML_P2P_CAPS_INDEX_PROP, - NVML_P2P_CAPS_INDEX_UNKNOWN -}nvmlGpuP2PCapsIndex_t; - -/** - * Maximum limit on Physical Bridges per Board - */ -#define NVML_MAX_PHYSICAL_BRIDGE (128) - -/** - * Information about the Bridge Chip Firmware - */ -typedef struct nvmlBridgeChipInfo_st -{ - nvmlBridgeChipType_t type; //!< Type of Bridge Chip - unsigned int fwVersion; //!< Firmware Version. 0=Version is unavailable -}nvmlBridgeChipInfo_t; - -/** - * This structure stores the complete Hierarchy of the Bridge Chip within the board. The immediate - * bridge is stored at index 0 of bridgeInfoList, parent to immediate bridge is at index 1 and so forth. - */ -typedef struct nvmlBridgeChipHierarchy_st -{ - unsigned char bridgeCount; //!< Number of Bridge Chips on the Board - nvmlBridgeChipInfo_t bridgeChipInfo[NVML_MAX_PHYSICAL_BRIDGE]; //!< Hierarchy of Bridge Chips on the board -}nvmlBridgeChipHierarchy_t; - -/** - * Represents Type of Sampling Event - */ -typedef enum nvmlSamplingType_enum -{ - NVML_TOTAL_POWER_SAMPLES = 0, //!< To represent total power drawn by GPU - NVML_GPU_UTILIZATION_SAMPLES = 1, //!< To represent percent of time during which one or more kernels was executing on the GPU - NVML_MEMORY_UTILIZATION_SAMPLES = 2, //!< To represent percent of time during which global (device) memory was being read or written - NVML_ENC_UTILIZATION_SAMPLES = 3, //!< To represent percent of time during which NVENC remains busy - NVML_DEC_UTILIZATION_SAMPLES = 4, //!< To represent percent of time during which NVDEC remains busy - NVML_PROCESSOR_CLK_SAMPLES = 5, //!< To represent processor clock samples - NVML_MEMORY_CLK_SAMPLES = 6, //!< To represent memory clock samples - - // Keep this last - NVML_SAMPLINGTYPE_COUNT -}nvmlSamplingType_t; - -/** - * Represents the queryable PCIe utilization counters - */ -typedef enum nvmlPcieUtilCounter_enum -{ - NVML_PCIE_UTIL_TX_BYTES = 0, // 1KB granularity - NVML_PCIE_UTIL_RX_BYTES = 1, // 1KB granularity - - // Keep this last - NVML_PCIE_UTIL_COUNT -} nvmlPcieUtilCounter_t; - -/** - * Represents the type for sample value returned - */ -typedef enum nvmlValueType_enum -{ - NVML_VALUE_TYPE_DOUBLE = 0, - NVML_VALUE_TYPE_UNSIGNED_INT = 1, - NVML_VALUE_TYPE_UNSIGNED_LONG = 2, - NVML_VALUE_TYPE_UNSIGNED_LONG_LONG = 3, - NVML_VALUE_TYPE_SIGNED_LONG_LONG = 4, - - // Keep this last - NVML_VALUE_TYPE_COUNT -}nvmlValueType_t; - - -/** - * Union to represent different types of Value - */ -typedef union nvmlValue_st -{ - double dVal; //!< If the value is double - unsigned int uiVal; //!< If the value is unsigned int - unsigned long ulVal; //!< If the value is unsigned long - unsigned long long ullVal; //!< If the value is unsigned long long - signed long long sllVal; //!< If the value is signed long long -}nvmlValue_t; - -/** - * Information for Sample - */ -typedef struct nvmlSample_st -{ - unsigned long long timeStamp; //!< CPU Timestamp in microseconds - nvmlValue_t sampleValue; //!< Sample Value -}nvmlSample_t; - -/** - * Represents type of perf policy for which violation times can be queried - */ -typedef enum nvmlPerfPolicyType_enum -{ - NVML_PERF_POLICY_POWER = 0, //!< How long did power violations cause the GPU to be below application clocks - NVML_PERF_POLICY_THERMAL = 1, //!< How long did thermal violations cause the GPU to be below application clocks - NVML_PERF_POLICY_SYNC_BOOST = 2, //!< How long did sync boost cause the GPU to be below application clocks - NVML_PERF_POLICY_BOARD_LIMIT = 3, //!< How long did the board limit cause the GPU to be below application clocks - NVML_PERF_POLICY_LOW_UTILIZATION = 4, //!< How long did low utilization cause the GPU to be below application clocks - NVML_PERF_POLICY_RELIABILITY = 5, //!< How long did the board reliability limit cause the GPU to be below application clocks - - NVML_PERF_POLICY_TOTAL_APP_CLOCKS = 10, //!< Total time the GPU was held below application clocks by any limiter (0 - 5 above) - NVML_PERF_POLICY_TOTAL_BASE_CLOCKS = 11, //!< Total time the GPU was held below base clocks - - // Keep this last - NVML_PERF_POLICY_COUNT -}nvmlPerfPolicyType_t; - -/** - * Struct to hold perf policy violation status data - */ -typedef struct nvmlViolationTime_st -{ - unsigned long long referenceTime; //!< referenceTime represents CPU timestamp in microseconds - unsigned long long violationTime; //!< violationTime in Nanoseconds -}nvmlViolationTime_t; - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlDeviceEnumvs Device Enums - * @{ - */ -/***************************************************************************************************/ - -/** - * Generic enable/disable enum. - */ -typedef enum nvmlEnableState_enum -{ - NVML_FEATURE_DISABLED = 0, //!< Feature disabled - NVML_FEATURE_ENABLED = 1 //!< Feature enabled -} nvmlEnableState_t; - -//! Generic flag used to specify the default behavior of some functions. See description of particular functions for details. -#define nvmlFlagDefault 0x00 -//! Generic flag used to force some behavior. See description of particular functions for details. -#define nvmlFlagForce 0x01 - -/** - * * The Brand of the GPU - * */ -typedef enum nvmlBrandType_enum -{ - NVML_BRAND_UNKNOWN = 0, - NVML_BRAND_QUADRO = 1, - NVML_BRAND_TESLA = 2, - NVML_BRAND_NVS = 3, - NVML_BRAND_GRID = 4, - NVML_BRAND_GEFORCE = 5, - NVML_BRAND_TITAN = 6, - - // Keep this last - NVML_BRAND_COUNT -} nvmlBrandType_t; - -/** - * Temperature thresholds. - */ -typedef enum nvmlTemperatureThresholds_enum -{ - NVML_TEMPERATURE_THRESHOLD_SHUTDOWN = 0, // Temperature at which the GPU will shut down - // for HW protection - NVML_TEMPERATURE_THRESHOLD_SLOWDOWN = 1, // Temperature at which the GPU will begin HW slowdown - NVML_TEMPERATURE_THRESHOLD_MEM_MAX = 2, // Memory Temperature at which the GPU will begin SW slowdown - NVML_TEMPERATURE_THRESHOLD_GPU_MAX = 3, // GPU Temperature at which the GPU can be throttled below base clock - // Keep this last - NVML_TEMPERATURE_THRESHOLD_COUNT -} nvmlTemperatureThresholds_t; - -/** - * Temperature sensors. - */ -typedef enum nvmlTemperatureSensors_enum -{ - NVML_TEMPERATURE_GPU = 0, //!< Temperature sensor for the GPU die - - // Keep this last - NVML_TEMPERATURE_COUNT -} nvmlTemperatureSensors_t; - -/** - * Compute mode. - * - * NVML_COMPUTEMODE_EXCLUSIVE_PROCESS was added in CUDA 4.0. - * Earlier CUDA versions supported a single exclusive mode, - * which is equivalent to NVML_COMPUTEMODE_EXCLUSIVE_THREAD in CUDA 4.0 and beyond. - */ -typedef enum nvmlComputeMode_enum -{ - NVML_COMPUTEMODE_DEFAULT = 0, //!< Default compute mode -- multiple contexts per device - NVML_COMPUTEMODE_EXCLUSIVE_THREAD = 1, //!< Support Removed - NVML_COMPUTEMODE_PROHIBITED = 2, //!< Compute-prohibited mode -- no contexts per device - NVML_COMPUTEMODE_EXCLUSIVE_PROCESS = 3, //!< Compute-exclusive-process mode -- only one context per device, usable from multiple threads at a time - - // Keep this last - NVML_COMPUTEMODE_COUNT -} nvmlComputeMode_t; - -/** - * ECC bit types. - * - * @deprecated See \ref nvmlMemoryErrorType_t for a more flexible type - */ -#define nvmlEccBitType_t nvmlMemoryErrorType_t - -/** - * Single bit ECC errors - * - * @deprecated Mapped to \ref NVML_MEMORY_ERROR_TYPE_CORRECTED - */ -#define NVML_SINGLE_BIT_ECC NVML_MEMORY_ERROR_TYPE_CORRECTED - -/** - * Double bit ECC errors - * - * @deprecated Mapped to \ref NVML_MEMORY_ERROR_TYPE_UNCORRECTED - */ -#define NVML_DOUBLE_BIT_ECC NVML_MEMORY_ERROR_TYPE_UNCORRECTED - -/** - * Memory error types - */ -typedef enum nvmlMemoryErrorType_enum -{ - /** - * A memory error that was corrected - * - * For ECC errors, these are single bit errors - * For Texture memory, these are errors fixed by resend - */ - NVML_MEMORY_ERROR_TYPE_CORRECTED = 0, - /** - * A memory error that was not corrected - * - * For ECC errors, these are double bit errors - * For Texture memory, these are errors where the resend fails - */ - NVML_MEMORY_ERROR_TYPE_UNCORRECTED = 1, - - - // Keep this last - NVML_MEMORY_ERROR_TYPE_COUNT //!< Count of memory error types - -} nvmlMemoryErrorType_t; - -/** - * ECC counter types. - * - * Note: Volatile counts are reset each time the driver loads. On Windows this is once per boot. On Linux this can be more frequent. - * On Linux the driver unloads when no active clients exist. If persistence mode is enabled or there is always a driver - * client active (e.g. X11), then Linux also sees per-boot behavior. If not, volatile counts are reset each time a compute app - * is run. - */ -typedef enum nvmlEccCounterType_enum -{ - NVML_VOLATILE_ECC = 0, //!< Volatile counts are reset each time the driver loads. - NVML_AGGREGATE_ECC = 1, //!< Aggregate counts persist across reboots (i.e. for the lifetime of the device) - - // Keep this last - NVML_ECC_COUNTER_TYPE_COUNT //!< Count of memory counter types -} nvmlEccCounterType_t; - -/** - * Clock types. - * - * All speeds are in Mhz. - */ -typedef enum nvmlClockType_enum -{ - NVML_CLOCK_GRAPHICS = 0, //!< Graphics clock domain - NVML_CLOCK_SM = 1, //!< SM clock domain - NVML_CLOCK_MEM = 2, //!< Memory clock domain - NVML_CLOCK_VIDEO = 3, //!< Video encoder/decoder clock domain - - // Keep this last - NVML_CLOCK_COUNT //!< Count of clock types -} nvmlClockType_t; - -/** - * Clock Ids. These are used in combination with nvmlClockType_t - * to specify a single clock value. - */ -typedef enum nvmlClockId_enum -{ - NVML_CLOCK_ID_CURRENT = 0, //!< Current actual clock value - NVML_CLOCK_ID_APP_CLOCK_TARGET = 1, //!< Target application clock - NVML_CLOCK_ID_APP_CLOCK_DEFAULT = 2, //!< Default application clock target - NVML_CLOCK_ID_CUSTOMER_BOOST_MAX = 3, //!< OEM-defined maximum clock rate - - //Keep this last - NVML_CLOCK_ID_COUNT //!< Count of Clock Ids. -} nvmlClockId_t; - -/** - * Driver models. - * - * Windows only. - */ -typedef enum nvmlDriverModel_enum -{ - NVML_DRIVER_WDDM = 0, //!< WDDM driver model -- GPU treated as a display device - NVML_DRIVER_WDM = 1 //!< WDM (TCC) model (recommended) -- GPU treated as a generic device -} nvmlDriverModel_t; - -/** - * Allowed PStates. - */ -typedef enum nvmlPStates_enum -{ - NVML_PSTATE_0 = 0, //!< Performance state 0 -- Maximum Performance - NVML_PSTATE_1 = 1, //!< Performance state 1 - NVML_PSTATE_2 = 2, //!< Performance state 2 - NVML_PSTATE_3 = 3, //!< Performance state 3 - NVML_PSTATE_4 = 4, //!< Performance state 4 - NVML_PSTATE_5 = 5, //!< Performance state 5 - NVML_PSTATE_6 = 6, //!< Performance state 6 - NVML_PSTATE_7 = 7, //!< Performance state 7 - NVML_PSTATE_8 = 8, //!< Performance state 8 - NVML_PSTATE_9 = 9, //!< Performance state 9 - NVML_PSTATE_10 = 10, //!< Performance state 10 - NVML_PSTATE_11 = 11, //!< Performance state 11 - NVML_PSTATE_12 = 12, //!< Performance state 12 - NVML_PSTATE_13 = 13, //!< Performance state 13 - NVML_PSTATE_14 = 14, //!< Performance state 14 - NVML_PSTATE_15 = 15, //!< Performance state 15 -- Minimum Performance - NVML_PSTATE_UNKNOWN = 32 //!< Unknown performance state -} nvmlPstates_t; - -/** - * GPU Operation Mode - * - * GOM allows to reduce power usage and optimize GPU throughput by disabling GPU features. - * - * Each GOM is designed to meet specific user needs. - */ -typedef enum nvmlGom_enum -{ - NVML_GOM_ALL_ON = 0, //!< Everything is enabled and running at full speed - - NVML_GOM_COMPUTE = 1, //!< Designed for running only compute tasks. Graphics operations - //!< are not allowed - - NVML_GOM_LOW_DP = 2 //!< Designed for running graphics applications that don't require - //!< high bandwidth double precision -} nvmlGpuOperationMode_t; - -/** - * Available infoROM objects. - */ -typedef enum nvmlInforomObject_enum -{ - NVML_INFOROM_OEM = 0, //!< An object defined by OEM - NVML_INFOROM_ECC = 1, //!< The ECC object determining the level of ECC support - NVML_INFOROM_POWER = 2, //!< The power management object - - // Keep this last - NVML_INFOROM_COUNT //!< This counts the number of infoROM objects the driver knows about -} nvmlInforomObject_t; - -/** - * Return values for NVML API calls. - */ -typedef enum nvmlReturn_enum -{ - NVML_SUCCESS = 0, //!< The operation was successful - NVML_ERROR_UNINITIALIZED = 1, //!< NVML was not first initialized with nvmlInit() - NVML_ERROR_INVALID_ARGUMENT = 2, //!< A supplied argument is invalid - NVML_ERROR_NOT_SUPPORTED = 3, //!< The requested operation is not available on target device - NVML_ERROR_NO_PERMISSION = 4, //!< The current user does not have permission for operation - NVML_ERROR_ALREADY_INITIALIZED = 5, //!< Deprecated: Multiple initializations are now allowed through ref counting - NVML_ERROR_NOT_FOUND = 6, //!< A query to find an object was unsuccessful - NVML_ERROR_INSUFFICIENT_SIZE = 7, //!< An input argument is not large enough - NVML_ERROR_INSUFFICIENT_POWER = 8, //!< A device's external power cables are not properly attached - NVML_ERROR_DRIVER_NOT_LOADED = 9, //!< NVIDIA driver is not loaded - NVML_ERROR_TIMEOUT = 10, //!< User provided timeout passed - NVML_ERROR_IRQ_ISSUE = 11, //!< NVIDIA Kernel detected an interrupt issue with a GPU - NVML_ERROR_LIBRARY_NOT_FOUND = 12, //!< NVML Shared Library couldn't be found or loaded - NVML_ERROR_FUNCTION_NOT_FOUND = 13, //!< Local version of NVML doesn't implement this function - NVML_ERROR_CORRUPTED_INFOROM = 14, //!< infoROM is corrupted - NVML_ERROR_GPU_IS_LOST = 15, //!< The GPU has fallen off the bus or has otherwise become inaccessible - NVML_ERROR_RESET_REQUIRED = 16, //!< The GPU requires a reset before it can be used again - NVML_ERROR_OPERATING_SYSTEM = 17, //!< The GPU control device has been blocked by the operating system/cgroups - NVML_ERROR_LIB_RM_VERSION_MISMATCH = 18, //!< RM detects a driver/library version mismatch - NVML_ERROR_IN_USE = 19, //!< An operation cannot be performed because the GPU is currently in use - NVML_ERROR_MEMORY = 20, //!< Insufficient memory - NVML_ERROR_NO_DATA = 21, //!usedGpuMemory is not supported - - - unsigned long long time; //!< Amount of time in ms during which the compute context was active. The time is reported as 0 if - //!< the process is not terminated - - unsigned long long startTime; //!< CPU Timestamp in usec representing start time for the process - - unsigned int isRunning; //!< Flag to represent if the process is running (1 for running, 0 for terminated) - - unsigned int reserved[5]; //!< Reserved for future use -} nvmlAccountingStats_t; - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlVgpuConstants Vgpu Constants - * @{ - */ -/***************************************************************************************************/ - -/** - * Buffer size guaranteed to be large enough for \ref nvmlVgpuTypeGetLicense - */ -#define NVML_GRID_LICENSE_BUFFER_SIZE 128 - -#define NVML_VGPU_NAME_BUFFER_SIZE 64 - -#define NVML_GRID_LICENSE_FEATURE_MAX_COUNT 3 - -/*! - * Macros for pGPU's virtualization capabilities bitfield. - */ -#define NVML_VGPU_PGPU_VIRTUALIZATION_CAP_MIGRATION 0:0 -#define NVML_VGPU_PGPU_VIRTUALIZATION_CAP_MIGRATION_NO 0x0 -#define NVML_VGPU_PGPU_VIRTUALIZATION_CAP_MIGRATION_YES 0x1 - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlVgpuEnum Vgpu Enum - * @{ - */ -/***************************************************************************************************/ - -/*! - * Types of VM identifiers - */ -typedef enum nvmlVgpuVmIdType { - NVML_VGPU_VM_ID_DOMAIN_ID = 0, //!< VM ID represents DOMAIN ID - NVML_VGPU_VM_ID_UUID = 1, //!< VM ID represents UUID -} nvmlVgpuVmIdType_t; - -/** - * vGPU GUEST info state. - */ -typedef enum nvmlVgpuGuestInfoState_enum -{ - NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED = 0, //!< Guest-dependent fields uninitialized - NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED = 1, //!< Guest-dependent fields initialized -} nvmlVgpuGuestInfoState_t; - -/** - * GRID license feature code - */ -typedef enum { - NVML_GRID_LICENSE_FEATURE_CODE_VGPU = 1, //!< Virtual GPU - NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION = 2 //!< Virtual Workstation -} nvmlGridLicenseFeatureCode_t; - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlVgpuStructs Vgpu Structs - * @{ - */ -/***************************************************************************************************/ - -typedef unsigned int nvmlVgpuTypeId_t; - -typedef unsigned int nvmlVgpuInstance_t; - -/** - * Structure to store Utilization Value and vgpuInstance - */ -typedef struct nvmlVgpuInstanceUtilizationSample_st -{ - nvmlVgpuInstance_t vgpuInstance; //!< vGPU Instance - unsigned long long timeStamp; //!< CPU Timestamp in microseconds - nvmlValue_t smUtil; //!< SM (3D/Compute) Util Value - nvmlValue_t memUtil; //!< Frame Buffer Memory Util Value - nvmlValue_t encUtil; //!< Encoder Util Value - nvmlValue_t decUtil; //!< Decoder Util Value -} nvmlVgpuInstanceUtilizationSample_t; - -/** - * Structure to store Utilization Value, vgpuInstance and subprocess information - */ -typedef struct nvmlVgpuProcessUtilizationSample_st -{ - nvmlVgpuInstance_t vgpuInstance; //!< vGPU Instance - unsigned int pid; //!< PID of process running within the vGPU VM - char processName[NVML_VGPU_NAME_BUFFER_SIZE]; //!< Name of process running within the vGPU VM - unsigned long long timeStamp; //!< CPU Timestamp in microseconds - unsigned int smUtil; //!< SM (3D/Compute) Util Value - unsigned int memUtil; //!< Frame Buffer Memory Util Value - unsigned int encUtil; //!< Encoder Util Value - unsigned int decUtil; //!< Decoder Util Value -} nvmlVgpuProcessUtilizationSample_t; - -/** - * Structure to store utilization value and process Id - */ -typedef struct nvmlProcessUtilizationSample_st -{ - unsigned int pid; //!< PID of process - unsigned long long timeStamp; //!< CPU Timestamp in microseconds - unsigned int smUtil; //!< SM (3D/Compute) Util Value - unsigned int memUtil; //!< Frame Buffer Memory Util Value - unsigned int encUtil; //!< Encoder Util Value - unsigned int decUtil; //!< Decoder Util Value -} nvmlProcessUtilizationSample_t; - -/** - * Structure containing GRID licensable feature information - */ -typedef struct nvmlGridLicensableFeature_st -{ - nvmlGridLicenseFeatureCode_t featureCode; //!< Licensed feature code - unsigned int featureState; //!< Non-zero if feature is currently licensed, otherwise zero - char licenseInfo[NVML_GRID_LICENSE_BUFFER_SIZE]; -} nvmlGridLicensableFeature_t; - -/** - * Structure to store GRID licensable features - */ -typedef struct nvmlGridLicensableFeatures_st -{ - int isGridLicenseSupported; //!< Non-zero if GRID Software Licensing is supported on the system, otherwise zero - unsigned int licensableFeaturesCount; //!< Entries returned in \a gridLicensableFeatures array - nvmlGridLicensableFeature_t gridLicensableFeatures[NVML_GRID_LICENSE_FEATURE_MAX_COUNT]; //!< Array of GRID licensable features. -} nvmlGridLicensableFeatures_t; - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlEncoderStructs Encoder Structs - * @{ - */ -/***************************************************************************************************/ - -/** - * Represents type of encoder for capacity can be queried - */ -typedef enum nvmlEncoderQueryType_enum -{ - NVML_ENCODER_QUERY_H264 = 0, //!< H264 encoder - NVML_ENCODER_QUERY_HEVC = 1, //!< HEVC encoder -}nvmlEncoderType_t; - -/** - * Structure to hold encoder session data - */ -typedef struct nvmlEncoderSessionInfo_st -{ - unsigned int sessionId; //!< Unique session ID - unsigned int pid; //!< Owning process ID - nvmlVgpuInstance_t vgpuInstance; //!< Owning vGPU instance ID (only valid on vGPU hosts, otherwise zero) - nvmlEncoderType_t codecType; //!< Video encoder type - unsigned int hResolution; //!< Current encode horizontal resolution - unsigned int vResolution; //!< Current encode vertical resolution - unsigned int averageFps; //!< Moving average encode frames per second - unsigned int averageLatency; //!< Moving average encode latency in microseconds -}nvmlEncoderSessionInfo_t; - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlFBCStructs Frame Buffer Capture Structures -* @{ -*/ -/***************************************************************************************************/ - -/** - * Represents frame buffer capture session type - */ -typedef enum nvmlFBCSessionType_enum -{ - NVML_FBC_SESSION_TYPE_UNKNOWN = 0, //!< Unknown - NVML_FBC_SESSION_TYPE_TOSYS, //!< ToSys - NVML_FBC_SESSION_TYPE_CUDA, //!< Cuda - NVML_FBC_SESSION_TYPE_VID, //!< Vid - NVML_FBC_SESSION_TYPE_HWENC, //!< HEnc -} nvmlFBCSessionType_t; - -/** - * Structure to hold frame buffer capture sessions stats - */ -typedef struct nvmlFBCStats_st -{ - unsigned int sessionsCount; //!< Total no of sessions - unsigned int averageFPS; //!< Moving average new frames captured per second - unsigned int averageLatency; //!< Moving average new frame capture latency in microseconds -} nvmlFBCStats_t; - -#define NVML_NVFBC_SESSION_FLAG_DIFFMAP_ENABLED 0x00000001 //!< Bit specifying differential map state. -#define NVML_NVFBC_SESSION_FLAG_CLASSIFICATIONMAP_ENABLED 0x00000002 //!< Bit specifying classification map state. -#define NVML_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_NO_WAIT 0x00000004 //!< Bit specifying if capture was requested as non-blocking call. -#define NVML_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_INFINITE 0x00000008 //!< Bit specifying if capture was requested as blocking call. -#define NVML_NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_TIMEOUT 0x00000010 //!< Bit specifying if capture was requested as blocking call with timeout period. - -/** - * Structure to hold FBC session data - */ -typedef struct nvmlFBCSessionInfo_st -{ - unsigned int sessionId; //!< Unique session ID - unsigned int pid; //!< Owning process ID - nvmlVgpuInstance_t vgpuInstance; //!< Owning vGPU instance ID (only valid on vGPU hosts, otherwise zero) - unsigned int displayOrdinal; //!< Display identifier - nvmlFBCSessionType_t sessionType; //!< Type of frame buffer capture session - unsigned int sessionFlags; //!< Session flags (one or more of NVML_NVFBC_SESSION_FLAG_XXX). - unsigned int hMaxResolution; //!< Max horizontal resolution supported by the capture session - unsigned int vMaxResolution; //!< Max vertical resolution supported by the capture session - unsigned int hResolution; //!< Horizontal resolution requested by caller in capture call - unsigned int vResolution; //!< Vertical resolution requested by caller in capture call - unsigned int averageFPS; //!< Moving average new frames captured per second - unsigned int averageLatency; //!< Moving average new frame capture latency in microseconds -} nvmlFBCSessionInfo_t; - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlDrainDefs definitions related to the drain state - * @{ - */ -/***************************************************************************************************/ - -/** - * Is the GPU device to be removed from the kernel by nvmlDeviceRemoveGpu() - */ -typedef enum nvmlDetachGpuState_enum -{ - NVML_DETACH_GPU_KEEP = 0, - NVML_DETACH_GPU_REMOVE, -} nvmlDetachGpuState_t; - -/** - * Parent bridge PCIe link state requested by nvmlDeviceRemoveGpu() - */ -typedef enum nvmlPcieLinkState_enum -{ - NVML_PCIE_LINK_KEEP = 0, - NVML_PCIE_LINK_SHUT_DOWN, -} nvmlPcieLinkState_t; - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlInitializationAndCleanup Initialization and Cleanup - * This chapter describes the methods that handle NVML initialization and cleanup. - * It is the user's responsibility to call \ref nvmlInit() before calling any other methods, and - * nvmlShutdown() once NVML is no longer being used. - * @{ - */ -/***************************************************************************************************/ - -#define NVML_INIT_FLAG_NO_GPUS 1 //!< Don't fail nvmlInit() when no GPUs are found -#define NVML_INIT_FLAG_NO_ATTACH 2 //!< Don't attach GPUs - -/** - * Initialize NVML, but don't initialize any GPUs yet. - * - * \note nvmlInit_v3 introduces a "flags" argument, that allows passing boolean values - * modifying the behaviour of nvmlInit(). - * \note In NVML 5.319 new nvmlInit_v2 has replaced nvmlInit"_v1" (default in NVML 4.304 and older) that - * did initialize all GPU devices in the system. - * - * This allows NVML to communicate with a GPU - * when other GPUs in the system are unstable or in a bad state. When using this API, GPUs are - * discovered and initialized in nvmlDeviceGetHandleBy* functions instead. - * - * \note To contrast nvmlInit_v2 with nvmlInit"_v1", NVML 4.304 nvmlInit"_v1" will fail when any detected GPU is in - * a bad or unstable state. - * - * For all products. - * - * This method, should be called once before invoking any other methods in the library. - * A reference count of the number of initializations is maintained. Shutdown only occurs - * when the reference count reaches zero. - * - * @return - * - \ref NVML_SUCCESS if NVML has been properly initialized - * - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running - * - \ref NVML_ERROR_NO_PERMISSION if NVML does not have permission to talk to the driver - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlInit(void); - -/** - * nvmlInitWithFlags is a variant of nvmlInit(), that allows passing a set of boolean values - * modifying the behaviour of nvmlInit(). - * Other than the "flags" parameter it is completely similar to \ref nvmlInit. - * - * For all products. - * - * @param flags behaviour modifier flags - * - * @return - * - \ref NVML_SUCCESS if NVML has been properly initialized - * - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running - * - \ref NVML_ERROR_NO_PERMISSION if NVML does not have permission to talk to the driver - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlInitWithFlags(unsigned int flags); - -/** - * Shut down NVML by releasing all GPU resources previously allocated with \ref nvmlInit(). - * - * For all products. - * - * This method should be called after NVML work is done, once for each call to \ref nvmlInit() - * A reference count of the number of initializations is maintained. Shutdown only occurs - * when the reference count reaches zero. For backwards compatibility, no error is reported if - * nvmlShutdown() is called more times than nvmlInit(). - * - * @return - * - \ref NVML_SUCCESS if NVML has been properly shut down - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlShutdown(void); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlErrorReporting Error reporting - * This chapter describes helper functions for error reporting routines. - * @{ - */ -/***************************************************************************************************/ - -/** - * Helper method for converting NVML error codes into readable strings. - * - * For all products. - * - * @param result NVML error code to convert - * - * @return String representation of the error. - * - */ -const DECLDIR char* nvmlErrorString(nvmlReturn_t result); -/** @} */ - - -/***************************************************************************************************/ -/** @defgroup nvmlConstants Constants - * @{ - */ -/***************************************************************************************************/ - -/** - * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetInforomVersion and \ref nvmlDeviceGetInforomImageVersion - */ -#define NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE 16 - -/** - * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetUUID - */ -#define NVML_DEVICE_UUID_BUFFER_SIZE 80 - -/** - * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetBoardPartNumber - */ -#define NVML_DEVICE_PART_NUMBER_BUFFER_SIZE 80 - -/** - * Buffer size guaranteed to be large enough for \ref nvmlSystemGetDriverVersion - */ -#define NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE 80 - -/** - * Buffer size guaranteed to be large enough for \ref nvmlSystemGetNVMLVersion - */ -#define NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE 80 - -/** - * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetName - */ -#define NVML_DEVICE_NAME_BUFFER_SIZE 64 - -/** - * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetSerial - */ -#define NVML_DEVICE_SERIAL_BUFFER_SIZE 30 - -/** - * Buffer size guaranteed to be large enough for \ref nvmlDeviceGetVbiosVersion - */ -#define NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE 32 - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlSystemQueries System Queries - * This chapter describes the queries that NVML can perform against the local system. These queries - * are not device-specific. - * @{ - */ -/***************************************************************************************************/ - -/** - * Retrieves the version of the system's graphics driver. - * - * For all products. - * - * The version identifier is an alphanumeric string. It will not exceed 80 characters in length - * (including the NULL terminator). See \ref nvmlConstants::NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE. - * - * @param version Reference in which to return the version identifier - * @param length The maximum allowed length of the string returned in \a version - * - * @return - * - \ref NVML_SUCCESS if \a version has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - */ -nvmlReturn_t DECLDIR nvmlSystemGetDriverVersion(char *version, unsigned int length); - -/** - * Retrieves the version of the NVML library. - * - * For all products. - * - * The version identifier is an alphanumeric string. It will not exceed 80 characters in length - * (including the NULL terminator). See \ref nvmlConstants::NVML_SYSTEM_NVML_VERSION_BUFFER_SIZE. - * - * @param version Reference in which to return the version identifier - * @param length The maximum allowed length of the string returned in \a version - * - * @return - * - \ref NVML_SUCCESS if \a version has been set - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - */ -nvmlReturn_t DECLDIR nvmlSystemGetNVMLVersion(char *version, unsigned int length); - -/** - * Retrieves the version of the CUDA driver. - * - * For all products. - * - * The returned CUDA driver version is the same as the CUDA API - * cuDriverGetVersion() would return on the system. - * - * @param cudaDriverVersion Reference in which to return the version identifier - * - * @return - * - \ref NVML_SUCCESS if \a cudaDriverVersion has been set - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a cudaDriverVersion is NULL - */ -nvmlReturn_t DECLDIR nvmlSystemGetCudaDriverVersion(int *cudaDriverVersion); - -/** - * Gets name of the process with provided process id - * - * For all products. - * - * Returned process name is cropped to provided length. - * name string is encoded in ANSI. - * - * @param pid The identifier of the process - * @param name Reference in which to return the process name - * @param length The maximum allowed length of the string returned in \a name - * - * @return - * - \ref NVML_SUCCESS if \a name has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a name is NULL or \a length is 0. - * - \ref NVML_ERROR_NOT_FOUND if process doesn't exists - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlSystemGetProcessName(unsigned int pid, char *name, unsigned int length); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlUnitQueries Unit Queries - * This chapter describes that queries that NVML can perform against each unit. For S-class systems only. - * In each case the device is identified with an nvmlUnit_t handle. This handle is obtained by - * calling \ref nvmlUnitGetHandleByIndex(). - * @{ - */ -/***************************************************************************************************/ - - /** - * Retrieves the number of units in the system. - * - * For S-class products. - * - * @param unitCount Reference in which to return the number of units - * - * @return - * - \ref NVML_SUCCESS if \a unitCount has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unitCount is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlUnitGetCount(unsigned int *unitCount); - -/** - * Acquire the handle for a particular unit, based on its index. - * - * For S-class products. - * - * Valid indices are derived from the \a unitCount returned by \ref nvmlUnitGetCount(). - * For example, if \a unitCount is 2 the valid indices are 0 and 1, corresponding to UNIT 0 and UNIT 1. - * - * The order in which NVML enumerates units has no guarantees of consistency between reboots. - * - * @param index The index of the target unit, >= 0 and < \a unitCount - * @param unit Reference in which to return the unit handle - * - * @return - * - \ref NVML_SUCCESS if \a unit has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a unit is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlUnitGetHandleByIndex(unsigned int index, nvmlUnit_t *unit); - -/** - * Retrieves the static information associated with a unit. - * - * For S-class products. - * - * See \ref nvmlUnitInfo_t for details on available unit info. - * - * @param unit The identifier of the target unit - * @param info Reference in which to return the unit information - * - * @return - * - \ref NVML_SUCCESS if \a info has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a info is NULL - */ -nvmlReturn_t DECLDIR nvmlUnitGetUnitInfo(nvmlUnit_t unit, nvmlUnitInfo_t *info); - -/** - * Retrieves the LED state associated with this unit. - * - * For S-class products. - * - * See \ref nvmlLedState_t for details on allowed states. - * - * @param unit The identifier of the target unit - * @param state Reference in which to return the current LED state - * - * @return - * - \ref NVML_SUCCESS if \a state has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a state is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlUnitSetLedState() - */ -nvmlReturn_t DECLDIR nvmlUnitGetLedState(nvmlUnit_t unit, nvmlLedState_t *state); - -/** - * Retrieves the PSU stats for the unit. - * - * For S-class products. - * - * See \ref nvmlPSUInfo_t for details on available PSU info. - * - * @param unit The identifier of the target unit - * @param psu Reference in which to return the PSU information - * - * @return - * - \ref NVML_SUCCESS if \a psu has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a psu is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlUnitGetPsuInfo(nvmlUnit_t unit, nvmlPSUInfo_t *psu); - -/** - * Retrieves the temperature readings for the unit, in degrees C. - * - * For S-class products. - * - * Depending on the product, readings may be available for intake (type=0), - * exhaust (type=1) and board (type=2). - * - * @param unit The identifier of the target unit - * @param type The type of reading to take - * @param temp Reference in which to return the intake temperature - * - * @return - * - \ref NVML_SUCCESS if \a temp has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a type is invalid or \a temp is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlUnitGetTemperature(nvmlUnit_t unit, unsigned int type, unsigned int *temp); - -/** - * Retrieves the fan speed readings for the unit. - * - * For S-class products. - * - * See \ref nvmlUnitFanSpeeds_t for details on available fan speed info. - * - * @param unit The identifier of the target unit - * @param fanSpeeds Reference in which to return the fan speed information - * - * @return - * - \ref NVML_SUCCESS if \a fanSpeeds has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid or \a fanSpeeds is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlUnitGetFanSpeedInfo(nvmlUnit_t unit, nvmlUnitFanSpeeds_t *fanSpeeds); - -/** - * Retrieves the set of GPU devices that are attached to the specified unit. - * - * For S-class products. - * - * The \a deviceCount argument is expected to be set to the size of the input \a devices array. - * - * @param unit The identifier of the target unit - * @param deviceCount Reference in which to provide the \a devices array size, and - * to return the number of attached GPU devices - * @param devices Reference in which to return the references to the attached GPU devices - * - * @return - * - \ref NVML_SUCCESS if \a deviceCount and \a devices have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a deviceCount indicates that the \a devices array is too small - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit is invalid, either of \a deviceCount or \a devices is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlUnitGetDevices(nvmlUnit_t unit, unsigned int *deviceCount, nvmlDevice_t *devices); - -/** - * Retrieves the IDs and firmware versions for any Host Interface Cards (HICs) in the system. - * - * For S-class products. - * - * The \a hwbcCount argument is expected to be set to the size of the input \a hwbcEntries array. - * The HIC must be connected to an S-class system for it to be reported by this function. - * - * @param hwbcCount Size of hwbcEntries array - * @param hwbcEntries Array holding information about hwbc - * - * @return - * - \ref NVML_SUCCESS if \a hwbcCount and \a hwbcEntries have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if either \a hwbcCount or \a hwbcEntries is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a hwbcCount indicates that the \a hwbcEntries array is too small - */ -nvmlReturn_t DECLDIR nvmlSystemGetHicVersion(unsigned int *hwbcCount, nvmlHwbcEntry_t *hwbcEntries); -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlDeviceQueries Device Queries - * This chapter describes that queries that NVML can perform against each device. - * In each case the device is identified with an nvmlDevice_t handle. This handle is obtained by - * calling one of \ref nvmlDeviceGetHandleByIndex(), \ref nvmlDeviceGetHandleBySerial(), - * \ref nvmlDeviceGetHandleByPciBusId(). or \ref nvmlDeviceGetHandleByUUID(). - * @{ - */ -/***************************************************************************************************/ - - /** - * Retrieves the number of compute devices in the system. A compute device is a single GPU. - * - * For all products. - * - * Note: New nvmlDeviceGetCount_v2 (default in NVML 5.319) returns count of all devices in the system - * even if nvmlDeviceGetHandleByIndex_v2 returns NVML_ERROR_NO_PERMISSION for such device. - * Update your code to handle this error, or use NVML 4.304 or older nvml header file. - * For backward binary compatibility reasons _v1 version of the API is still present in the shared - * library. - * Old _v1 version of nvmlDeviceGetCount doesn't count devices that NVML has no permission to talk to. - * - * @param deviceCount Reference in which to return the number of accessible devices - * - * @return - * - \ref NVML_SUCCESS if \a deviceCount has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a deviceCount is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCount(unsigned int *deviceCount); - -/** - * Acquire the handle for a particular device, based on its index. - * - * For all products. - * - * Valid indices are derived from the \a accessibleDevices count returned by - * \ref nvmlDeviceGetCount(). For example, if \a accessibleDevices is 2 the valid indices - * are 0 and 1, corresponding to GPU 0 and GPU 1. - * - * The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it - * is recommended that devices be looked up by their PCI ids or UUID. See - * \ref nvmlDeviceGetHandleByUUID() and \ref nvmlDeviceGetHandleByPciBusId(). - * - * Note: The NVML index may not correlate with other APIs, such as the CUDA device index. - * - * Starting from NVML 5, this API causes NVML to initialize the target GPU - * NVML may initialize additional GPUs if: - * - The target GPU is an SLI slave - * - * Note: New nvmlDeviceGetCount_v2 (default in NVML 5.319) returns count of all devices in the system - * even if nvmlDeviceGetHandleByIndex_v2 returns NVML_ERROR_NO_PERMISSION for such device. - * Update your code to handle this error, or use NVML 4.304 or older nvml header file. - * For backward binary compatibility reasons _v1 version of the API is still present in the shared - * library. - * Old _v1 version of nvmlDeviceGetCount doesn't count devices that NVML has no permission to talk to. - * - * This means that nvmlDeviceGetHandleByIndex_v2 and _v1 can return different devices for the same index. - * If you don't touch macros that map old (_v1) versions to _v2 versions at the top of the file you don't - * need to worry about that. - * - * @param index The index of the target GPU, >= 0 and < \a accessibleDevices - * @param device Reference in which to return the device handle - * - * @return - * - \ref NVML_SUCCESS if \a device has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a device is NULL - * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to talk to this device - * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetIndex - * @see nvmlDeviceGetCount - */ -nvmlReturn_t DECLDIR nvmlDeviceGetHandleByIndex(unsigned int index, nvmlDevice_t *device); - -/** - * Acquire the handle for a particular device, based on its board serial number. - * - * For Fermi &tm; or newer fully supported devices. - * - * This number corresponds to the value printed directly on the board, and to the value returned by - * \ref nvmlDeviceGetSerial(). - * - * @deprecated Since more than one GPU can exist on a single board this function is deprecated in favor - * of \ref nvmlDeviceGetHandleByUUID. - * For dual GPU boards this function will return NVML_ERROR_INVALID_ARGUMENT. - * - * Starting from NVML 5, this API causes NVML to initialize the target GPU - * NVML may initialize additional GPUs as it searches for the target GPU - * - * @param serial The board serial number of the target GPU - * @param device Reference in which to return the device handle - * - * @return - * - \ref NVML_SUCCESS if \a device has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a serial is invalid, \a device is NULL or more than one - * device has the same serial (dual GPU boards) - * - \ref NVML_ERROR_NOT_FOUND if \a serial does not match a valid device on the system - * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables - * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs - * - \ref NVML_ERROR_GPU_IS_LOST if any GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetSerial - * @see nvmlDeviceGetHandleByUUID - */ -nvmlReturn_t DECLDIR nvmlDeviceGetHandleBySerial(const char *serial, nvmlDevice_t *device); - -/** - * Acquire the handle for a particular device, based on its globally unique immutable UUID associated with each device. - * - * For all products. - * - * @param uuid The UUID of the target GPU - * @param device Reference in which to return the device handle - * - * Starting from NVML 5, this API causes NVML to initialize the target GPU - * NVML may initialize additional GPUs as it searches for the target GPU - * - * @return - * - \ref NVML_SUCCESS if \a device has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a uuid is invalid or \a device is null - * - \ref NVML_ERROR_NOT_FOUND if \a uuid does not match a valid device on the system - * - \ref NVML_ERROR_INSUFFICIENT_POWER if any attached devices have improperly attached external power cables - * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs - * - \ref NVML_ERROR_GPU_IS_LOST if any GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetUUID - */ -nvmlReturn_t DECLDIR nvmlDeviceGetHandleByUUID(const char *uuid, nvmlDevice_t *device); - -/** - * Acquire the handle for a particular device, based on its PCI bus id. - * - * For all products. - * - * This value corresponds to the nvmlPciInfo_t::busId returned by \ref nvmlDeviceGetPciInfo(). - * - * Starting from NVML 5, this API causes NVML to initialize the target GPU - * NVML may initialize additional GPUs if: - * - The target GPU is an SLI slave - * - * \note NVML 4.304 and older version of nvmlDeviceGetHandleByPciBusId"_v1" returns NVML_ERROR_NOT_FOUND - * instead of NVML_ERROR_NO_PERMISSION. - * - * @param pciBusId The PCI bus id of the target GPU - * @param device Reference in which to return the device handle - * - * @return - * - \ref NVML_SUCCESS if \a device has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pciBusId is invalid or \a device is NULL - * - \ref NVML_ERROR_NOT_FOUND if \a pciBusId does not match a valid device on the system - * - \ref NVML_ERROR_INSUFFICIENT_POWER if the attached device has improperly attached external power cables - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to talk to this device - * - \ref NVML_ERROR_IRQ_ISSUE if NVIDIA kernel detected an interrupt issue with the attached GPUs - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetHandleByPciBusId(const char *pciBusId, nvmlDevice_t *device); - -/** - * Retrieves the name of this device. - * - * For all products. - * - * The name is an alphanumeric string that denotes a particular product, e.g. Tesla &tm; C2070. It will not - * exceed 64 characters in length (including the NULL terminator). See \ref - * nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. - * - * @param device The identifier of the target device - * @param name Reference in which to return the product name - * @param length The maximum allowed length of the string returned in \a name - * - * @return - * - \ref NVML_SUCCESS if \a name has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a name is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetName(nvmlDevice_t device, char *name, unsigned int length); - -/** - * Retrieves the brand of this device. - * - * For all products. - * - * The type is a member of \ref nvmlBrandType_t defined above. - * - * @param device The identifier of the target device - * @param type Reference in which to return the product brand type - * - * @return - * - \ref NVML_SUCCESS if \a name has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a type is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetBrand(nvmlDevice_t device, nvmlBrandType_t *type); - -/** - * Retrieves the NVML index of this device. - * - * For all products. - * - * Valid indices are derived from the \a accessibleDevices count returned by - * \ref nvmlDeviceGetCount(). For example, if \a accessibleDevices is 2 the valid indices - * are 0 and 1, corresponding to GPU 0 and GPU 1. - * - * The order in which NVML enumerates devices has no guarantees of consistency between reboots. For that reason it - * is recommended that devices be looked up by their PCI ids or GPU UUID. See - * \ref nvmlDeviceGetHandleByPciBusId() and \ref nvmlDeviceGetHandleByUUID(). - * - * Note: The NVML index may not correlate with other APIs, such as the CUDA device index. - * - * @param device The identifier of the target device - * @param index Reference in which to return the NVML index of the device - * - * @return - * - \ref NVML_SUCCESS if \a index has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a index is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetHandleByIndex() - * @see nvmlDeviceGetCount() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetIndex(nvmlDevice_t device, unsigned int *index); - -/** - * Retrieves the globally unique board serial number associated with this device's board. - * - * For all products with an inforom. - * - * The serial number is an alphanumeric string that will not exceed 30 characters (including the NULL terminator). - * This number matches the serial number tag that is physically attached to the board. See \ref - * nvmlConstants::NVML_DEVICE_SERIAL_BUFFER_SIZE. - * - * @param device The identifier of the target device - * @param serial Reference in which to return the board/module serial number - * @param length The maximum allowed length of the string returned in \a serial - * - * @return - * - \ref NVML_SUCCESS if \a serial has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a serial is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSerial(nvmlDevice_t device, char *serial, unsigned int length); - -/** - * Retrieves an array of unsigned ints (sized to cpuSetSize) of bitmasks with the ideal CPU affinity for the device - * For example, if processors 0, 1, 32, and 33 are ideal for the device and cpuSetSize == 2, - * result[0] = 0x3, result[1] = 0x3 - * - * For Kepler &tm; or newer fully supported devices. - * Supported on Linux only. - * - * @param device The identifier of the target device - * @param cpuSetSize The size of the cpuSet array that is safe to access - * @param cpuSet Array reference in which to return a bitmask of CPUs, 64 CPUs per - * unsigned long on 64-bit machines, 32 on 32-bit machines - * - * @return - * - \ref NVML_SUCCESS if \a cpuAffinity has been filled - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, cpuSetSize == 0, or cpuSet is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCpuAffinity(nvmlDevice_t device, unsigned int cpuSetSize, unsigned long *cpuSet); - -/** - * Sets the ideal affinity for the calling thread and device using the guidelines - * given in nvmlDeviceGetCpuAffinity(). Note, this is a change as of version 8.0. - * Older versions set the affinity for a calling process and all children. - * Currently supports up to 64 processors. - * - * For Kepler &tm; or newer fully supported devices. - * Supported on Linux only. - * - * @param device The identifier of the target device - * - * @return - * - \ref NVML_SUCCESS if the calling process has been successfully bound - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceSetCpuAffinity(nvmlDevice_t device); - -/** - * Clear all affinity bindings for the calling thread. Note, this is a change as of version - * 8.0 as older versions cleared the affinity for a calling process and all children. - * - * For Kepler &tm; or newer fully supported devices. - * Supported on Linux only. - * - * @param device The identifier of the target device - * - * @return - * - \ref NVML_SUCCESS if the calling process has been successfully unbound - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceClearCpuAffinity(nvmlDevice_t device); - -/** - * Retrieve the common ancestor for two devices - * For all products. - * Supported on Linux only. - * - * @param device1 The identifier of the first device - * @param device2 The identifier of the second device - * @param pathInfo A \ref nvmlGpuTopologyLevel_t that gives the path type - * - * @return - * - \ref NVML_SUCCESS if \a pathInfo has been set - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1, or \a device2 is invalid, or \a pathInfo is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature - * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTopologyCommonAncestor(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuTopologyLevel_t *pathInfo); - -/** - * Retrieve the set of GPUs that are nearest to a given device at a specific interconnectivity level - * For all products. - * Supported on Linux only. - * - * @param device The identifier of the first device - * @param level The \ref nvmlGpuTopologyLevel_t level to search for other GPUs - * @param count When zero, is set to the number of matching GPUs such that \a deviceArray - * can be malloc'd. When non-zero, \a deviceArray will be filled with \a count - * number of device handles. - * @param deviceArray An array of device handles for GPUs found at \a level - * - * @return - * - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a level, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count - * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature - * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTopologyNearestGpus(nvmlDevice_t device, nvmlGpuTopologyLevel_t level, unsigned int *count, nvmlDevice_t *deviceArray); - -/** - * Retrieve the set of GPUs that have a CPU affinity with the given CPU number - * For all products. - * Supported on Linux only. - * - * @param cpuNumber The CPU number - * @param count When zero, is set to the number of matching GPUs such that \a deviceArray - * can be malloc'd. When non-zero, \a deviceArray will be filled with \a count - * number of device handles. - * @param deviceArray An array of device handles for GPUs found with affinity to \a cpuNumber - * - * @return - * - \ref NVML_SUCCESS if \a deviceArray or \a count (if initially zero) has been set - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a cpuNumber, or \a count is invalid, or \a deviceArray is NULL with a non-zero \a count - * - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature - * - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery - */ -nvmlReturn_t DECLDIR nvmlSystemGetTopologyGpuSet(unsigned int cpuNumber, unsigned int *count, nvmlDevice_t *deviceArray); - -/** - * Retrieve the status for a given p2p capability index between a given pair of GPU - * - * @param device1 The first device - * @param device2 The second device - * @param p2pIndex p2p Capability Index being looked for between \a device1 and \a device2 - * @param p2pStatus Reference in which to return the status of the \a p2pIndex - * between \a device1 and \a device2 - * @return - * - \ref NVML_SUCCESS if \a p2pStatus has been populated - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1 or \a device2 or \a p2pIndex is invalid or \a p2pStatus is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetP2PStatus(nvmlDevice_t device1, nvmlDevice_t device2, nvmlGpuP2PCapsIndex_t p2pIndex,nvmlGpuP2PStatus_t *p2pStatus); - -/** - * Retrieves the globally unique immutable UUID associated with this device, as a 5 part hexadecimal string, - * that augments the immutable, board serial identifier. - * - * For all products. - * - * The UUID is a globally unique identifier. It is the only available identifier for pre-Fermi-architecture products. - * It does NOT correspond to any identifier printed on the board. It will not exceed 80 characters in length - * (including the NULL terminator). See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. - * - * @param device The identifier of the target device - * @param uuid Reference in which to return the GPU UUID - * @param length The maximum allowed length of the string returned in \a uuid - * - * @return - * - \ref NVML_SUCCESS if \a uuid has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a uuid is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetUUID(nvmlDevice_t device, char *uuid, unsigned int length); - -/** - * Retrieves minor number for the device. The minor number for the device is such that the Nvidia device node file for - * each GPU will have the form /dev/nvidia[minor number]. - * - * For all products. - * Supported only for Linux - * - * @param device The identifier of the target device - * @param minorNumber Reference in which to return the minor number for the device - * @return - * - \ref NVML_SUCCESS if the minor number is successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minorNumber is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMinorNumber(nvmlDevice_t device, unsigned int *minorNumber); - -/** - * Retrieves the the device board part number which is programmed into the board's InfoROM - * - * For all products. - * - * @param device Identifier of the target device - * @param partNumber Reference to the buffer to return - * @param length Length of the buffer reference - * - * @return - * - \ref NVML_SUCCESS if \a partNumber has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_NOT_SUPPORTED if the needed VBIOS fields have not been filled - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a serial is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetBoardPartNumber(nvmlDevice_t device, char* partNumber, unsigned int length); - -/** - * Retrieves the version information for the device's infoROM object. - * - * For all products with an inforom. - * - * Fermi and higher parts have non-volatile on-board memory for persisting device info, such as aggregate - * ECC counts. The version of the data structures in this memory may change from time to time. It will not - * exceed 16 characters in length (including the NULL terminator). - * See \ref nvmlConstants::NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE. - * - * See \ref nvmlInforomObject_t for details on the available infoROM objects. - * - * @param device The identifier of the target device - * @param object The target infoROM object - * @param version Reference in which to return the infoROM version - * @param length The maximum allowed length of the string returned in \a version - * - * @return - * - \ref NVML_SUCCESS if \a version has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetInforomImageVersion - */ -nvmlReturn_t DECLDIR nvmlDeviceGetInforomVersion(nvmlDevice_t device, nvmlInforomObject_t object, char *version, unsigned int length); - -/** - * Retrieves the global infoROM image version - * - * For all products with an inforom. - * - * Image version just like VBIOS version uniquely describes the exact version of the infoROM flashed on the board - * in contrast to infoROM object version which is only an indicator of supported features. - * Version string will not exceed 16 characters in length (including the NULL terminator). - * See \ref nvmlConstants::NVML_DEVICE_INFOROM_VERSION_BUFFER_SIZE. - * - * @param device The identifier of the target device - * @param version Reference in which to return the infoROM image version - * @param length The maximum allowed length of the string returned in \a version - * - * @return - * - \ref NVML_SUCCESS if \a version has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a version is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have an infoROM - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetInforomVersion - */ -nvmlReturn_t DECLDIR nvmlDeviceGetInforomImageVersion(nvmlDevice_t device, char *version, unsigned int length); - -/** - * Retrieves the checksum of the configuration stored in the device's infoROM. - * - * For all products with an inforom. - * - * Can be used to make sure that two GPUs have the exact same configuration. - * Current checksum takes into account configuration stored in PWR and ECC infoROM objects. - * Checksum can change between driver releases or when user changes configuration (e.g. disable/enable ECC) - * - * @param device The identifier of the target device - * @param checksum Reference in which to return the infoROM configuration checksum - * - * @return - * - \ref NVML_SUCCESS if \a checksum has been set - * - \ref NVML_ERROR_CORRUPTED_INFOROM if the device's checksum couldn't be retrieved due to infoROM corruption - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a checksum is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetInforomConfigurationChecksum(nvmlDevice_t device, unsigned int *checksum); - -/** - * Reads the infoROM from the flash and verifies the checksums. - * - * For all products with an inforom. - * - * @param device The identifier of the target device - * - * @return - * - \ref NVML_SUCCESS if infoROM is not corrupted - * - \ref NVML_ERROR_CORRUPTED_INFOROM if the device's infoROM is corrupted - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceValidateInforom(nvmlDevice_t device); - -/** - * Retrieves the display mode for the device. - * - * For all products. - * - * This method indicates whether a physical display (e.g. monitor) is currently connected to - * any of the device's connectors. - * - * See \ref nvmlEnableState_t for details on allowed modes. - * - * @param device The identifier of the target device - * @param display Reference in which to return the display mode - * - * @return - * - \ref NVML_SUCCESS if \a display has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a display is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDisplayMode(nvmlDevice_t device, nvmlEnableState_t *display); - -/** - * Retrieves the display active state for the device. - * - * For all products. - * - * This method indicates whether a display is initialized on the device. - * For example whether X Server is attached to this device and has allocated memory for the screen. - * - * Display can be active even when no monitor is physically attached. - * - * See \ref nvmlEnableState_t for details on allowed modes. - * - * @param device The identifier of the target device - * @param isActive Reference in which to return the display active state - * - * @return - * - \ref NVML_SUCCESS if \a isActive has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isActive is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDisplayActive(nvmlDevice_t device, nvmlEnableState_t *isActive); - -/** - * Retrieves the persistence mode associated with this device. - * - * For all products. - * For Linux only. - * - * When driver persistence mode is enabled the driver software state is not torn down when the last - * client disconnects. By default this feature is disabled. - * - * See \ref nvmlEnableState_t for details on allowed modes. - * - * @param device The identifier of the target device - * @param mode Reference in which to return the current driver persistence mode - * - * @return - * - \ref NVML_SUCCESS if \a mode has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetPersistenceMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t *mode); - -/** - * Retrieves the PCI attributes of this device. - * - * For all products. - * - * See \ref nvmlPciInfo_t for details on the available PCI info. - * - * @param device The identifier of the target device - * @param pci Reference in which to return the PCI info - * - * @return - * - \ref NVML_SUCCESS if \a pci has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pci is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPciInfo(nvmlDevice_t device, nvmlPciInfo_t *pci); - -/** - * Retrieves the maximum PCIe link generation possible with this device and system - * - * I.E. for a generation 2 PCIe device attached to a generation 1 PCIe bus the max link generation this function will - * report is generation 1. - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param maxLinkGen Reference in which to return the max PCIe link generation - * - * @return - * - \ref NVML_SUCCESS if \a maxLinkGen has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkGen is null - * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMaxPcieLinkGeneration(nvmlDevice_t device, unsigned int *maxLinkGen); - -/** - * Retrieves the maximum PCIe link width possible with this device and system - * - * I.E. for a device with a 16x PCIe bus width attached to a 8x PCIe system bus this function will report - * a max link width of 8. - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param maxLinkWidth Reference in which to return the max PCIe link generation - * - * @return - * - \ref NVML_SUCCESS if \a maxLinkWidth has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkWidth is null - * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMaxPcieLinkWidth(nvmlDevice_t device, unsigned int *maxLinkWidth); - -/** - * Retrieves the current PCIe link generation - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param currLinkGen Reference in which to return the current PCIe link generation - * - * @return - * - \ref NVML_SUCCESS if \a currLinkGen has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkGen is null - * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCurrPcieLinkGeneration(nvmlDevice_t device, unsigned int *currLinkGen); - -/** - * Retrieves the current PCIe link width - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param currLinkWidth Reference in which to return the current PCIe link generation - * - * @return - * - \ref NVML_SUCCESS if \a currLinkWidth has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a currLinkWidth is null - * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCurrPcieLinkWidth(nvmlDevice_t device, unsigned int *currLinkWidth); - -/** - * Retrieve PCIe utilization information. - * This function is querying a byte counter over a 20ms interval and thus is the - * PCIe throughput over that interval. - * - * For Maxwell &tm; or newer fully supported devices. - * - * This method is not supported in virtual machines running virtual GPU (vGPU). - * - * @param device The identifier of the target device - * @param counter The specific counter that should be queried \ref nvmlPcieUtilCounter_t - * @param value Reference in which to return throughput in KB/s - * - * @return - * - \ref NVML_SUCCESS if \a value has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a counter is invalid, or \a value is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPcieThroughput(nvmlDevice_t device, nvmlPcieUtilCounter_t counter, unsigned int *value); - -/** - * Retrieve the PCIe replay counter. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param value Reference in which to return the counter's value - * - * @return - * - \ref NVML_SUCCESS if \a value has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a value is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPcieReplayCounter(nvmlDevice_t device, unsigned int *value); - -/** - * Retrieves the current clock speeds for the device. - * - * For Fermi &tm; or newer fully supported devices. - * - * See \ref nvmlClockType_t for details on available clock information. - * - * @param device The identifier of the target device - * @param type Identify which clock domain to query - * @param clock Reference in which to return the clock speed in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clock has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device cannot report the specified clock - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int *clock); - -/** - * Retrieves the maximum clock speeds for the device. - * - * For Fermi &tm; or newer fully supported devices. - * - * See \ref nvmlClockType_t for details on available clock information. - * - * \note On GPUs from Fermi family current P0 clocks (reported by \ref nvmlDeviceGetClockInfo) can differ from max clocks - * by few MHz. - * - * @param device The identifier of the target device - * @param type Identify which clock domain to query - * @param clock Reference in which to return the clock speed in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clock has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device cannot report the specified clock - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMaxClockInfo(nvmlDevice_t device, nvmlClockType_t type, unsigned int *clock); - -/** - * Retrieves the current setting of a clock that applications will use unless an overspec situation occurs. - * Can be changed using \ref nvmlDeviceSetApplicationsClocks. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param clockType Identify which clock domain to query - * @param clockMHz Reference in which to return the clock in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clockMHz has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetApplicationsClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); - -/** - * Retrieves the default applications clock that GPU boots with or - * defaults to after \ref nvmlDeviceResetApplicationsClocks call. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param clockType Identify which clock domain to query - * @param clockMHz Reference in which to return the default clock in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clockMHz has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * \see nvmlDeviceGetApplicationsClock - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDefaultApplicationsClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); - -/** - * Resets the application clock to the default value - * - * This is the applications clock that will be used after system reboot or driver reload. - * Default value is constant, but the current value an be changed using \ref nvmlDeviceSetApplicationsClocks. - * - * On Pascal and newer hardware, if clocks were previously locked with \ref nvmlDeviceSetApplicationsClocks, - * this call will unlock clocks. This returns clocks their default behavior ofautomatically boosting above - * base clocks as thermal limits allow. - * - * @see nvmlDeviceGetApplicationsClock - * @see nvmlDeviceSetApplicationsClocks - * - * For Fermi &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. - * - * @param device The identifier of the target device - * - * @return - * - \ref NVML_SUCCESS if new settings were successfully set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceResetApplicationsClocks(nvmlDevice_t device); - -/** - * Retrieves the clock speed for the clock specified by the clock type and clock ID. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param clockType Identify which clock domain to query - * @param clockId Identify which clock in the domain to query - * @param clockMHz Reference in which to return the clock in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clockMHz has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetClock(nvmlDevice_t device, nvmlClockType_t clockType, nvmlClockId_t clockId, unsigned int *clockMHz); - -/** - * Retrieves the customer defined maximum boost clock speed specified by the given clock type. - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param clockType Identify which clock domain to query - * @param clockMHz Reference in which to return the clock in MHz - * - * @return - * - \ref NVML_SUCCESS if \a clockMHz has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clockMHz is NULL or \a clockType is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device or the \a clockType on this device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMaxCustomerBoostClock(nvmlDevice_t device, nvmlClockType_t clockType, unsigned int *clockMHz); - -/** - * Retrieves the list of possible memory clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param count Reference in which to provide the \a clocksMHz array size, and - * to return the number of elements - * @param clocksMHz Reference in which to return the clock in MHz - * - * @return - * - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to the number of - * required elements) - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetApplicationsClocks - * @see nvmlDeviceGetSupportedGraphicsClocks - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedMemoryClocks(nvmlDevice_t device, unsigned int *count, unsigned int *clocksMHz); - -/** - * Retrieves the list of possible graphics clocks that can be used as an argument for \ref nvmlDeviceSetApplicationsClocks. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param memoryClockMHz Memory clock for which to return possible graphics clocks - * @param count Reference in which to provide the \a clocksMHz array size, and - * to return the number of elements - * @param clocksMHz Reference in which to return the clocks in MHz - * - * @return - * - \ref NVML_SUCCESS if \a count and \a clocksMHz have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_NOT_FOUND if the specified \a memoryClockMHz is not a supported frequency - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clock is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetApplicationsClocks - * @see nvmlDeviceGetSupportedMemoryClocks - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedGraphicsClocks(nvmlDevice_t device, unsigned int memoryClockMHz, unsigned int *count, unsigned int *clocksMHz); - -/** - * Retrieve the current state of Auto Boosted clocks on a device and store it in \a isEnabled - * - * For Kepler &tm; or newer fully supported devices. - * - * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates - * to maximize performance as thermal limits allow. - * - * On Pascal and newer hardware, Auto Aoosted clocks are controlled through application clocks. - * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost - * behavior. - * - * @param device The identifier of the target device - * @param isEnabled Where to store the current state of Auto Boosted clocks of the target device - * @param defaultIsEnabled Where to store the default Auto Boosted clocks behavior of the target device that the device will - * revert to when no applications are using the GPU - * - * @return - * - \ref NVML_SUCCESS If \a isEnabled has been been set with the Auto Boosted clocks state of \a device - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isEnabled is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t *isEnabled, nvmlEnableState_t *defaultIsEnabled); - -/** - * Try to set the current state of Auto Boosted clocks on a device. - * - * For Kepler &tm; or newer fully supported devices. - * - * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates - * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock - * rates are desired. - * - * Non-root users may use this API by default but can be restricted by root from using this API by calling - * \ref nvmlDeviceSetAPIRestriction with apiType=NVML_RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS. - * Note: Persistence Mode is required to modify current Auto Boost settings, therefore, it must be enabled. - * - * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. - * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost - * behavior. - * - * @param device The identifier of the target device - * @param enabled What state to try to set Auto Boosted clocks of the target device to - * - * @return - * - \ref NVML_SUCCESS If the Auto Boosted clocks were successfully set to the state specified by \a enabled - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - */ -nvmlReturn_t DECLDIR nvmlDeviceSetAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled); - -/** - * Try to set the default state of Auto Boosted clocks on a device. This is the default state that Auto Boosted clocks will - * return to when no compute running processes (e.g. CUDA application which have an active context) are running - * - * For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. - * Requires root/admin permissions. - * - * Auto Boosted clocks are enabled by default on some hardware, allowing the GPU to run at higher clock rates - * to maximize performance as thermal limits allow. Auto Boosted clocks should be disabled if fixed clock - * rates are desired. - * - * On Pascal and newer hardware, Auto Boosted clocks are controlled through application clocks. - * Use \ref nvmlDeviceSetApplicationsClocks and \ref nvmlDeviceResetApplicationsClocks to control Auto Boost - * behavior. - * - * @param device The identifier of the target device - * @param enabled What state to try to set default Auto Boosted clocks of the target device to - * @param flags Flags that change the default behavior. Currently Unused. - * - * @return - * - \ref NVML_SUCCESS If the Auto Boosted clock's default state was successfully set to the state specified by \a enabled - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_NO_PERMISSION If the calling user does not have permission to change Auto Boosted clock's default state. - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support Auto Boosted clocks - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - */ -nvmlReturn_t DECLDIR nvmlDeviceSetDefaultAutoBoostedClocksEnabled(nvmlDevice_t device, nvmlEnableState_t enabled, unsigned int flags); - - -/** - * Retrieves the intended operating speed of the device's fan. - * - * Note: The reported speed is the intended fan speed. If the fan is physically blocked and unable to spin, the - * output will not match the actual fan speed. - * - * For all discrete products with dedicated fans. - * - * The fan speed is expressed as a percent of the maximum, i.e. full speed is 100%. - * - * @param device The identifier of the target device - * @param speed Reference in which to return the fan speed percentage - * - * @return - * - \ref NVML_SUCCESS if \a speed has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a speed is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetFanSpeed(nvmlDevice_t device, unsigned int *speed); - -/** - * Retrieves the current temperature readings for the device, in degrees C. - * - * For all products. - * - * See \ref nvmlTemperatureSensors_t for details on available temperature sensors. - * - * @param device The identifier of the target device - * @param sensorType Flag that indicates which sensor reading to retrieve - * @param temp Reference in which to return the temperature reading - * - * @return - * - \ref NVML_SUCCESS if \a temp has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a sensorType is invalid or \a temp is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have the specified sensor - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTemperature(nvmlDevice_t device, nvmlTemperatureSensors_t sensorType, unsigned int *temp); - -/** - * Retrieves the temperature threshold for the GPU with the specified threshold type in degrees C. - * - * For Kepler &tm; or newer fully supported devices. - * - * See \ref nvmlTemperatureThresholds_t for details on available temperature thresholds. - * - * @param device The identifier of the target device - * @param thresholdType The type of threshold value queried - * @param temp Reference in which to return the temperature reading - * @return - * - \ref NVML_SUCCESS if \a temp has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a thresholdType is invalid or \a temp is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a temperature sensor or is unsupported - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, unsigned int *temp); - -/** - * Retrieves the current performance state for the device. - * - * For Fermi &tm; or newer fully supported devices. - * - * See \ref nvmlPstates_t for details on allowed performance states. - * - * @param device The identifier of the target device - * @param pState Reference in which to return the performance state reading - * - * @return - * - \ref NVML_SUCCESS if \a pState has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPerformanceState(nvmlDevice_t device, nvmlPstates_t *pState); - -/** - * Retrieves current clocks throttling reasons. - * - * For all fully supported products. - * - * \note More than one bit can be enabled at the same time. Multiple reasons can be affecting clocks at once. - * - * @param device The identifier of the target device - * @param clocksThrottleReasons Reference in which to return bitmask of active clocks throttle - * reasons - * - * @return - * - \ref NVML_SUCCESS if \a clocksThrottleReasons has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a clocksThrottleReasons is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlClocksThrottleReasons - * @see nvmlDeviceGetSupportedClocksThrottleReasons - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCurrentClocksThrottleReasons(nvmlDevice_t device, unsigned long long *clocksThrottleReasons); - -/** - * Retrieves bitmask of supported clocks throttle reasons that can be returned by - * \ref nvmlDeviceGetCurrentClocksThrottleReasons - * - * For all fully supported products. - * - * This method is not supported in virtual machines running virtual GPU (vGPU). - * - * @param device The identifier of the target device - * @param supportedClocksThrottleReasons Reference in which to return bitmask of supported - * clocks throttle reasons - * - * @return - * - \ref NVML_SUCCESS if \a supportedClocksThrottleReasons has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a supportedClocksThrottleReasons is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlClocksThrottleReasons - * @see nvmlDeviceGetCurrentClocksThrottleReasons - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedClocksThrottleReasons(nvmlDevice_t device, unsigned long long *supportedClocksThrottleReasons); - -/** - * Deprecated: Use \ref nvmlDeviceGetPerformanceState. This function exposes an incorrect generalization. - * - * Retrieve the current performance state for the device. - * - * For Fermi &tm; or newer fully supported devices. - * - * See \ref nvmlPstates_t for details on allowed performance states. - * - * @param device The identifier of the target device - * @param pState Reference in which to return the performance state reading - * - * @return - * - \ref NVML_SUCCESS if \a pState has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pState is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerState(nvmlDevice_t device, nvmlPstates_t *pState); - -/** - * This API has been deprecated. - * - * Retrieves the power management mode associated with this device. - * - * For products from the Fermi family. - * - Requires \a NVML_INFOROM_POWER version 3.0 or higher. - * - * For from the Kepler or newer families. - * - Does not require \a NVML_INFOROM_POWER object. - * - * This flag indicates whether any power management algorithm is currently active on the device. An - * enabled state does not necessarily mean the device is being actively throttled -- only that - * that the driver will do so if the appropriate conditions are met. - * - * See \ref nvmlEnableState_t for details on allowed modes. - * - * @param device The identifier of the target device - * @param mode Reference in which to return the current power management mode - * - * @return - * - \ref NVML_SUCCESS if \a mode has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementMode(nvmlDevice_t device, nvmlEnableState_t *mode); - -/** - * Retrieves the power management limit associated with this device. - * - * For Fermi &tm; or newer fully supported devices. - * - * The power limit defines the upper boundary for the card's power draw. If - * the card's total power draw reaches this limit the power management algorithm kicks in. - * - * This reading is only available if power management mode is supported. - * See \ref nvmlDeviceGetPowerManagementMode. - * - * @param device The identifier of the target device - * @param limit Reference in which to return the power management limit in milliwatts - * - * @return - * - \ref NVML_SUCCESS if \a limit has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementLimit(nvmlDevice_t device, unsigned int *limit); - -/** - * Retrieves information about possible values of power management limits on this device. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param minLimit Reference in which to return the minimum power management limit in milliwatts - * @param maxLimit Reference in which to return the maximum power management limit in milliwatts - * - * @return - * - \ref NVML_SUCCESS if \a minLimit and \a maxLimit have been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minLimit or \a maxLimit is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetPowerManagementLimit - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementLimitConstraints(nvmlDevice_t device, unsigned int *minLimit, unsigned int *maxLimit); - -/** - * Retrieves default power management limit on this device, in milliwatts. - * Default power management limit is a power management limit that the device boots with. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param defaultLimit Reference in which to return the default power management limit in milliwatts - * - * @return - * - \ref NVML_SUCCESS if \a defaultLimit has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerManagementDefaultLimit(nvmlDevice_t device, unsigned int *defaultLimit); - -/** - * Retrieves power usage for this GPU in milliwatts and its associated circuitry (e.g. memory) - * - * For Fermi &tm; or newer fully supported devices. - * - * On Fermi and Kepler GPUs the reading is accurate to within +/- 5% of current power draw. - * - * It is only available if power management mode is supported. See \ref nvmlDeviceGetPowerManagementMode. - * - * @param device The identifier of the target device - * @param power Reference in which to return the power usage information - * - * @return - * - \ref NVML_SUCCESS if \a power has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a power is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support power readings - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetPowerUsage(nvmlDevice_t device, unsigned int *power); - -/** - * Retrieves total energy consumption for this GPU in millijoules (mJ) since the driver was last reloaded - * - * For newer than Pascal &tm; fully supported devices. - * - * @param device The identifier of the target device - * @param energy Reference in which to return the energy consumption information - * - * @return - * - \ref NVML_SUCCESS if \a energy has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a energy is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support energy readings - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTotalEnergyConsumption(nvmlDevice_t device, unsigned long long *energy); - -/** - * Get the effective power limit that the driver enforces after taking into account all limiters - * - * Note: This can be different from the \ref nvmlDeviceGetPowerManagementLimit if other limits are set elsewhere - * This includes the out of band power limit interface - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The device to communicate with - * @param limit Reference in which to return the power management limit in milliwatts - * - * @return - * - \ref NVML_SUCCESS if \a limit has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a limit is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEnforcedPowerLimit(nvmlDevice_t device, unsigned int *limit); - -/** - * Retrieves the current GOM and pending GOM (the one that GPU will switch to after reboot). - * - * For GK110 M-class and X-class Tesla &tm; products from the Kepler family. - * Modes \ref NVML_GOM_LOW_DP and \ref NVML_GOM_ALL_ON are supported on fully supported GeForce products. - * Not supported on Quadro ® and Tesla &tm; C-class products. - * - * @param device The identifier of the target device - * @param current Reference in which to return the current GOM - * @param pending Reference in which to return the pending GOM - * - * @return - * - \ref NVML_SUCCESS if \a mode has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a current or \a pending is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlGpuOperationMode_t - * @see nvmlDeviceSetGpuOperationMode - */ -nvmlReturn_t DECLDIR nvmlDeviceGetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t *current, nvmlGpuOperationMode_t *pending); - -/** - * Retrieves the amount of used, free and total memory available on the device, in bytes. - * - * For all products. - * - * Enabling ECC reduces the amount of total available memory, due to the extra required parity bits. - * Under WDDM most device memory is allocated and managed on startup by Windows. - * - * Under Linux and Windows TCC, the reported amount of used memory is equal to the sum of memory allocated - * by all active channels on the device. - * - * See \ref nvmlMemory_t for details on available memory info. - * - * @param device The identifier of the target device - * @param memory Reference in which to return the memory information - * - * @return - * - \ref NVML_SUCCESS if \a memory has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memory is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMemoryInfo(nvmlDevice_t device, nvmlMemory_t *memory); - -/** - * Retrieves the current compute mode for the device. - * - * For all products. - * - * See \ref nvmlComputeMode_t for details on allowed compute modes. - * - * @param device The identifier of the target device - * @param mode Reference in which to return the current compute mode - * - * @return - * - \ref NVML_SUCCESS if \a mode has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetComputeMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetComputeMode(nvmlDevice_t device, nvmlComputeMode_t *mode); - -/** - * Retrieves the CUDA compute capability of the device. - * - * For all products. - * - * Returns the major and minor compute capability version numbers of the - * device. The major and minor versions are equivalent to the - * CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR and - * CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR attributes that would be - * returned by CUDA's cuDeviceGetAttribute(). - * - * @param device The identifier of the target device - * @param major Reference in which to return the major CUDA compute capability - * @param minor Reference in which to return the minor CUDA compute capability - * - * @return - * - \ref NVML_SUCCESS if \a major and \a minor have been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a major or \a minor are NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCudaComputeCapability(nvmlDevice_t device, int *major, int *minor); - -/** - * Retrieves the current and pending ECC modes for the device. - * - * For Fermi &tm; or newer fully supported devices. - * Only applicable to devices with ECC. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher. - * - * Changing ECC modes requires a reboot. The "pending" ECC mode refers to the target mode following - * the next reboot. - * - * See \ref nvmlEnableState_t for details on allowed modes. - * - * @param device The identifier of the target device - * @param current Reference in which to return the current ECC mode - * @param pending Reference in which to return the pending ECC mode - * - * @return - * - \ref NVML_SUCCESS if \a current and \a pending have been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or either \a current or \a pending is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetEccMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEccMode(nvmlDevice_t device, nvmlEnableState_t *current, nvmlEnableState_t *pending); - -/** - * Retrieves the device boardId from 0-N. - * Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with - * \ref nvmlDeviceGetMultiGpuBoard() to decide if they are on the same board as well. - * The boardId returned is a unique ID for the current configuration. Uniqueness and ordering across - * reboots and system configurations is not guaranteed (i.e. if a Tesla K40c returns 0x100 and - * the two GPUs on a Tesla K10 in the same system returns 0x200 it is not guaranteed they will - * always return those values but they will always be different from each other). - * - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param boardId Reference in which to return the device's board ID - * - * @return - * - \ref NVML_SUCCESS if \a boardId has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a boardId is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetBoardId(nvmlDevice_t device, unsigned int *boardId); - -/** - * Retrieves whether the device is on a Multi-GPU Board - * Devices that are on multi-GPU boards will set \a multiGpuBool to a non-zero value. - * - * For Fermi &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param multiGpuBool Reference in which to return a zero or non-zero value - * to indicate whether the device is on a multi GPU board - * - * @return - * - \ref NVML_SUCCESS if \a multiGpuBool has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a multiGpuBool is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMultiGpuBoard(nvmlDevice_t device, unsigned int *multiGpuBool); - -/** - * Retrieves the total ECC error counts for the device. - * - * For Fermi &tm; or newer fully supported devices. - * Only applicable to devices with ECC. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher. - * Requires ECC Mode to be enabled. - * - * The total error count is the sum of errors across each of the separate memory systems, i.e. the total set of - * errors across the entire device. - * - * See \ref nvmlMemoryErrorType_t for a description of available error types.\n - * See \ref nvmlEccCounterType_t for a description of available counter types. - * - * @param device The identifier of the target device - * @param errorType Flag that specifies the type of the errors. - * @param counterType Flag that specifies the counter-type of the errors. - * @param eccCounts Reference in which to return the specified ECC errors - * - * @return - * - \ref NVML_SUCCESS if \a eccCounts has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceClearEccErrorCounts() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetTotalEccErrors(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, unsigned long long *eccCounts); - -/** - * Retrieves the detailed ECC error counts for the device. - * - * @deprecated This API supports only a fixed set of ECC error locations - * On different GPU architectures different locations are supported - * See \ref nvmlDeviceGetMemoryErrorCounter - * - * For Fermi &tm; or newer fully supported devices. - * Only applicable to devices with ECC. - * Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based ECC counts. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other ECC counts. - * Requires ECC Mode to be enabled. - * - * Detailed errors provide separate ECC counts for specific parts of the memory system. - * - * Reports zero for unsupported ECC error counters when a subset of ECC error counters are supported. - * - * See \ref nvmlMemoryErrorType_t for a description of available bit types.\n - * See \ref nvmlEccCounterType_t for a description of available counter types.\n - * See \ref nvmlEccErrorCounts_t for a description of provided detailed ECC counts. - * - * @param device The identifier of the target device - * @param errorType Flag that specifies the type of the errors. - * @param counterType Flag that specifies the counter-type of the errors. - * @param eccCounts Reference in which to return the specified ECC errors - * - * @return - * - \ref NVML_SUCCESS if \a eccCounts has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a errorType or \a counterType is invalid, or \a eccCounts is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceClearEccErrorCounts() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDetailedEccErrors(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, nvmlEccCounterType_t counterType, nvmlEccErrorCounts_t *eccCounts); - -/** - * Retrieves the requested memory error counter for the device. - * - * For Fermi &tm; or newer fully supported devices. - * Requires \a NVML_INFOROM_ECC version 2.0 or higher to report aggregate location-based memory error counts. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher to report all other memory error counts. - * - * Only applicable to devices with ECC. - * - * Requires ECC Mode to be enabled. - * - * See \ref nvmlMemoryErrorType_t for a description of available memory error types.\n - * See \ref nvmlEccCounterType_t for a description of available counter types.\n - * See \ref nvmlMemoryLocation_t for a description of available counter locations.\n - * - * @param device The identifier of the target device - * @param errorType Flag that specifies the type of error. - * @param counterType Flag that specifies the counter-type of the errors. - * @param locationType Specifies the location of the counter. - * @param count Reference in which to return the ECC counter - * - * @return - * - \ref NVML_SUCCESS if \a count has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a bitTyp,e \a counterType or \a locationType is - * invalid, or \a count is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support ECC error reporting in the specified memory - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetMemoryErrorCounter(nvmlDevice_t device, nvmlMemoryErrorType_t errorType, - nvmlEccCounterType_t counterType, - nvmlMemoryLocation_t locationType, unsigned long long *count); - -/** - * Retrieves the current utilization rates for the device's major subsystems. - * - * For Fermi &tm; or newer fully supported devices. - * - * See \ref nvmlUtilization_t for details on available utilization rates. - * - * \note During driver initialization when ECC is enabled one can see high GPU and Memory Utilization readings. - * This is caused by ECC Memory Scrubbing mechanism that is performed during driver initialization. - * - * @param device The identifier of the target device - * @param utilization Reference in which to return the utilization information - * - * @return - * - \ref NVML_SUCCESS if \a utilization has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a utilization is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetUtilizationRates(nvmlDevice_t device, nvmlUtilization_t *utilization); - -/** - * Retrieves the current utilization and sampling size in microseconds for the Encoder - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param utilization Reference to an unsigned int for encoder utilization info - * @param samplingPeriodUs Reference to an unsigned int for the sampling period in US - * - * @return - * - \ref NVML_SUCCESS if \a utilization has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEncoderUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs); - -/** - * Retrieves the current capacity of the device's encoder, as a percentage of maximum encoder capacity with valid values in the range 0-100. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param encoderQueryType Type of encoder to query - * @param encoderCapacity Reference to an unsigned int for the encoder capacity - * - * @return - * - \ref NVML_SUCCESS if \a encoderCapacity is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a encoderCapacity is NULL, or \a device or \a encoderQueryType - * are invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if device does not support the encoder specified in \a encodeQueryType - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEncoderCapacity (nvmlDevice_t device, nvmlEncoderType_t encoderQueryType, unsigned int *encoderCapacity); - -/** - * Retrieves the current encoder statistics for a given device. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param sessionCount Reference to an unsigned int for count of active encoder sessions - * @param averageFps Reference to an unsigned int for trailing average FPS of all active sessions - * @param averageLatency Reference to an unsigned int for encode latency in microseconds - * - * @return - * - \ref NVML_SUCCESS if \a sessionCount, \a averageFps and \a averageLatency is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount, or \a device or \a averageFps, - * or \a averageLatency is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEncoderStats (nvmlDevice_t device, unsigned int *sessionCount, - unsigned int *averageFps, unsigned int *averageLatency); - -/** - * Retrieves information about active encoder sessions on a target device. - * - * An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfos. The - * array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions - * written to the buffer. - * - * If the supplied buffer is not large enough to accommodate the active session array, the function returns - * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount. - * To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return - * NVML_SUCCESS with number of active encoder sessions updated in *sessionCount. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param sessionCount Reference to caller supplied array size, and returns the number of sessions. - * @param sessionInfos Reference in which to return the session information - * - * @return - * - \ref NVML_SUCCESS if \a sessionInfos is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL. - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetEncoderSessions(nvmlDevice_t device, unsigned int *sessionCount, nvmlEncoderSessionInfo_t *sessionInfos); - -/** - * Retrieves the current utilization and sampling size in microseconds for the Decoder - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param utilization Reference to an unsigned int for decoder utilization info - * @param samplingPeriodUs Reference to an unsigned int for the sampling period in US - * - * @return - * - \ref NVML_SUCCESS if \a utilization has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDecoderUtilization(nvmlDevice_t device, unsigned int *utilization, unsigned int *samplingPeriodUs); - -/** -* Retrieves the active frame buffer capture sessions statistics for a given device. -* -* For Maxwell &tm; or newer fully supported devices. -* -* @param device The identifier of the target device -* @param fbcStats Reference to nvmlFBCStats_t structure containing NvFBC stats -* -* @return -* - \ref NVML_SUCCESS if \a fbcStats is fetched -* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized -* - \ref NVML_ERROR_INVALID_ARGUMENT if \a fbcStats is NULL -* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible -* - \ref NVML_ERROR_UNKNOWN on any unexpected error -*/ -nvmlReturn_t DECLDIR nvmlDeviceGetFBCStats(nvmlDevice_t device, nvmlFBCStats_t *fbcStats); - -/** -* Retrieves information about active frame buffer capture sessions on a target device. -* -* An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The -* array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions -* written to the buffer. -* -* If the supplied buffer is not large enough to accommodate the active session array, the function returns -* NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlFBCSessionInfo_t array required in \a sessionCount. -* To query the number of active FBC sessions, call this function with *sessionCount = 0. The code will return -* NVML_SUCCESS with number of active FBC sessions updated in *sessionCount. -* -* For Maxwell &tm; or newer fully supported devices. -* -* @note hResolution, vResolution, averageFPS and averageLatency data for a FBC session returned in \a sessionInfo may -* be zero if there are no new frames captured since the session started. -* -* @param device The identifier of the target device -* @param sessionCount Reference to caller supplied array size, and returns the number of sessions. -* @param sessionInfo Reference in which to return the session information -* -* @return -* - \ref NVML_SUCCESS if \a sessionInfo is fetched -* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized -* - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount -* - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL. -* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible -* - \ref NVML_ERROR_UNKNOWN on any unexpected error -*/ -nvmlReturn_t DECLDIR nvmlDeviceGetFBCSessions(nvmlDevice_t device, unsigned int *sessionCount, nvmlFBCSessionInfo_t *sessionInfo); - -/** - * Retrieves the current and pending driver model for the device. - * - * For Fermi &tm; or newer fully supported devices. - * For windows only. - * - * On Windows platforms the device driver can run in either WDDM or WDM (TCC) mode. If a display is attached - * to the device it must run in WDDM mode. TCC mode is preferred if a display is not attached. - * - * See \ref nvmlDriverModel_t for details on available driver models. - * - * @param device The identifier of the target device - * @param current Reference in which to return the current driver model - * @param pending Reference in which to return the pending driver model - * - * @return - * - \ref NVML_SUCCESS if either \a current and/or \a pending have been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or both \a current and \a pending are NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the platform is not windows - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceSetDriverModel() - */ -nvmlReturn_t DECLDIR nvmlDeviceGetDriverModel(nvmlDevice_t device, nvmlDriverModel_t *current, nvmlDriverModel_t *pending); - -/** - * Get VBIOS version of the device. - * - * For all products. - * - * The VBIOS version may change from time to time. It will not exceed 32 characters in length - * (including the NULL terminator). See \ref nvmlConstants::NVML_DEVICE_VBIOS_VERSION_BUFFER_SIZE. - * - * @param device The identifier of the target device - * @param version Reference to which to return the VBIOS version - * @param length The maximum allowed length of the string returned in \a version - * - * @return - * - \ref NVML_SUCCESS if \a version has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a version is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetVbiosVersion(nvmlDevice_t device, char *version, unsigned int length); - -/** - * Get Bridge Chip Information for all the bridge chips on the board. - * - * For all fully supported products. - * Only applicable to multi-GPU products. - * - * @param device The identifier of the target device - * @param bridgeHierarchy Reference to the returned bridge chip Hierarchy - * - * @return - * - \ref NVML_SUCCESS if bridge chip exists - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a bridgeInfo is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if bridge chip not supported on the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - */ -nvmlReturn_t DECLDIR nvmlDeviceGetBridgeChipInfo(nvmlDevice_t device, nvmlBridgeChipHierarchy_t *bridgeHierarchy); - -/** - * Get information about processes with a compute context on a device - * - * For Fermi &tm; or newer fully supported devices. - * - * This function returns information only about compute running processes (e.g. CUDA application which have - * active context). Any graphics applications (e.g. using OpenGL, DirectX) won't be listed by this function. - * - * To query the current number of running compute processes, call this function with *infoCount = 0. The - * return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call - * \a infos is allowed to be NULL. - * - * The usedGpuMemory field returned is all of the memory used by the application. - * - * Keep in mind that information returned by this call is dynamic and the number of elements might change in - * time. Allocate more space for \a infos table in case new compute processes are spawned. - * - * @param device The identifier of the target device - * @param infoCount Reference in which to provide the \a infos array size, and - * to return the number of returned elements - * @param infos Reference in which to return the process information - * - * @return - * - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small - * \a infoCount will contain minimal amount of space necessary for - * the call to complete - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see \ref nvmlSystemGetProcessName - */ -nvmlReturn_t DECLDIR nvmlDeviceGetComputeRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); - -/** - * Get information about processes with a graphics context on a device - * - * For Kepler &tm; or newer fully supported devices. - * - * This function returns information only about graphics based processes - * (eg. applications using OpenGL, DirectX) - * - * To query the current number of running graphics processes, call this function with *infoCount = 0. The - * return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call - * \a infos is allowed to be NULL. - * - * The usedGpuMemory field returned is all of the memory used by the application. - * - * Keep in mind that information returned by this call is dynamic and the number of elements might change in - * time. Allocate more space for \a infos table in case new graphics processes are spawned. - * - * @param device The identifier of the target device - * @param infoCount Reference in which to provide the \a infos array size, and - * to return the number of returned elements - * @param infos Reference in which to return the process information - * - * @return - * - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small - * \a infoCount will contain minimal amount of space necessary for - * the call to complete - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see \ref nvmlSystemGetProcessName - */ -nvmlReturn_t DECLDIR nvmlDeviceGetGraphicsRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); - -/** - * Check if the GPU devices are on the same physical board. - * - * For all fully supported products. - * - * @param device1 The first GPU device - * @param device2 The second GPU device - * @param onSameBoard Reference in which to return the status. - * Non-zero indicates that the GPUs are on the same board. - * - * @return - * - \ref NVML_SUCCESS if \a onSameBoard has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a dev1 or \a dev2 are invalid or \a onSameBoard is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this check is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the either GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceOnSameBoard(nvmlDevice_t device1, nvmlDevice_t device2, int *onSameBoard); - -/** - * Retrieves the root/admin permissions on the target API. See \a nvmlRestrictedAPI_t for the list of supported APIs. - * If an API is restricted only root users can call that API. See \a nvmlDeviceSetAPIRestriction to change current permissions. - * - * For all fully supported products. - * - * @param device The identifier of the target device - * @param apiType Target API type for this operation - * @param isRestricted Reference in which to return the current restriction - * NVML_FEATURE_ENABLED indicates that the API is root-only - * NVML_FEATURE_DISABLED indicates that the API is accessible to all users - * - * @return - * - \ref NVML_SUCCESS if \a isRestricted has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a apiType incorrect or \a isRestricted is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device or the device does not support - * the feature that is being queried (E.G. Enabling/disabling Auto Boosted clocks is - * not supported by the device) - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlRestrictedAPI_t - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t *isRestricted); - -/** - * Gets recent samples for the GPU. - * - * For Kepler &tm; or newer fully supported devices. - * - * Based on type, this method can be used to fetch the power, utilization or clock samples maintained in the buffer by - * the driver. - * - * Power, Utilization and Clock samples are returned as type "unsigned int" for the union nvmlValue_t. - * - * To get the size of samples that user needs to allocate, the method is invoked with samples set to NULL. - * The returned samplesCount will provide the number of samples that can be queried. The user needs to - * allocate the buffer with size as samplesCount * sizeof(nvmlSample_t). - * - * lastSeenTimeStamp represents CPU timestamp in microseconds. Set it to 0 to fetch all the samples maintained by the - * underlying buffer. Set lastSeenTimeStamp to one of the timeStamps retrieved from the date of the previous query - * to get more recent samples. - * - * This method fetches the number of entries which can be accommodated in the provided samples array, and the - * reference samplesCount is updated to indicate how many samples were actually retrieved. The advantage of using this - * method for samples in contrast to polling via existing methods is to get get higher frequency data at lower polling cost. - * - * @param device The identifier for the target device - * @param type Type of sampling event - * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. - * @param sampleValType Output parameter to represent the type of sample value as described in nvmlSampleVal_t - * @param sampleCount Reference to provide the number of elements which can be queried in samples array - * @param samples Reference in which samples are returned - - * @return - * - \ref NVML_SUCCESS if samples are successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a samplesCount is NULL or - * reference to \a sampleCount is 0 for non null \a samples - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSamples(nvmlDevice_t device, nvmlSamplingType_t type, unsigned long long lastSeenTimeStamp, - nvmlValueType_t *sampleValType, unsigned int *sampleCount, nvmlSample_t *samples); - -/** - * Gets Total, Available and Used size of BAR1 memory. - * - * BAR1 is used to map the FB (device memory) so that it can be directly accessed by the CPU or by 3rd party - * devices (peer-to-peer on the PCIE bus). - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param bar1Memory Reference in which BAR1 memory - * information is returned. - * - * @return - * - \ref NVML_SUCCESS if BAR1 memory is successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a bar1Memory is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - */ -nvmlReturn_t DECLDIR nvmlDeviceGetBAR1MemoryInfo(nvmlDevice_t device, nvmlBAR1Memory_t *bar1Memory); - - -/** - * Gets the duration of time during which the device was throttled (lower than requested clocks) due to power - * or thermal constraints. - * - * The method is important to users who are tying to understand if their GPUs throttle at any point during their applications. The - * difference in violation times at two different reference times gives the indication of GPU throttling event. - * - * Violation for thermal capping is not supported at this time. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param perfPolicyType Represents Performance policy which can trigger GPU throttling - * @param violTime Reference to which violation time related information is returned - * - * - * @return - * - \ref NVML_SUCCESS if violation time is successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a perfPolicyType is invalid, or \a violTime is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - */ -nvmlReturn_t DECLDIR nvmlDeviceGetViolationStatus(nvmlDevice_t device, nvmlPerfPolicyType_t perfPolicyType, nvmlViolationTime_t *violTime); - -/** - * @} - */ - -/** @addtogroup nvmlAccountingStats - * @{ - */ - -/** - * Queries the state of per process accounting mode. - * - * For Kepler &tm; or newer fully supported devices. - * - * See \ref nvmlDeviceGetAccountingStats for more details. - * See \ref nvmlDeviceSetAccountingMode - * - * @param device The identifier of the target device - * @param mode Reference in which to return the current accounting mode - * - * @return - * - \ref NVML_SUCCESS if the mode has been successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode are NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAccountingMode(nvmlDevice_t device, nvmlEnableState_t *mode); - -/** - * Queries process's accounting stats. - * - * For Kepler &tm; or newer fully supported devices. - * - * Accounting stats capture GPU utilization and other statistics across the lifetime of a process. - * Accounting stats can be queried during life time of the process and after its termination. - * The time field in \ref nvmlAccountingStats_t is reported as 0 during the lifetime of the process and - * updated to actual running time after its termination. - * Accounting stats are kept in a circular buffer, newly created processes overwrite information about old - * processes. - * - * See \ref nvmlAccountingStats_t for description of each returned metric. - * List of processes that can be queried can be retrieved from \ref nvmlDeviceGetAccountingPids. - * - * @note Accounting Mode needs to be on. See \ref nvmlDeviceGetAccountingMode. - * @note Only compute and graphics applications stats can be queried. Monitoring applications stats can't be - * queried since they don't contribute to GPU utilization. - * @note In case of pid collision stats of only the latest process (that terminated last) will be reported - * - * @warning On Kepler devices per process statistics are accurate only if there's one process running on a GPU. - * - * @param device The identifier of the target device - * @param pid Process Id of the target process to query stats for - * @param stats Reference in which to return the process's accounting stats - * - * @return - * - \ref NVML_SUCCESS if stats have been successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a stats are NULL - * - \ref NVML_ERROR_NOT_FOUND if process stats were not found - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature or accounting mode is disabled - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetAccountingBufferSize - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAccountingStats(nvmlDevice_t device, unsigned int pid, nvmlAccountingStats_t *stats); - -/** - * Queries list of processes that can be queried for accounting stats. The list of processes returned - * can be in running or terminated state. - * - * For Kepler &tm; or newer fully supported devices. - * - * To just query the number of processes ready to be queried, call this function with *count = 0 and - * pids=NULL. The return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if list is empty. - * - * For more details see \ref nvmlDeviceGetAccountingStats. - * - * @note In case of PID collision some processes might not be accessible before the circular buffer is full. - * - * @param device The identifier of the target device - * @param count Reference in which to provide the \a pids array size, and - * to return the number of elements ready to be queried - * @param pids Reference in which to return list of process ids - * - * @return - * - \ref NVML_SUCCESS if pids were successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a count is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature or accounting mode is disabled - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to - * expected value) - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetAccountingBufferSize - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAccountingPids(nvmlDevice_t device, unsigned int *count, unsigned int *pids); - -/** - * Returns the number of processes that the circular buffer with accounting pids can hold. - * - * For Kepler &tm; or newer fully supported devices. - * - * This is the maximum number of processes that accounting information will be stored for before information - * about oldest processes will get overwritten by information about new processes. - * - * @param device The identifier of the target device - * @param bufferSize Reference in which to provide the size (in number of elements) - * of the circular buffer for accounting stats. - * - * @return - * - \ref NVML_SUCCESS if buffer size was successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a bufferSize is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature or accounting mode is disabled - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetAccountingStats - * @see nvmlDeviceGetAccountingPids - */ -nvmlReturn_t DECLDIR nvmlDeviceGetAccountingBufferSize(nvmlDevice_t device, unsigned int *bufferSize); - -/** @} */ - -/** @addtogroup nvmlDeviceQueries - * @{ - */ - -/** - * Returns the list of retired pages by source, including pages that are pending retirement - * The address information provided from this API is the hardware address of the page that was retired. Note - * that this does not match the virtual address used in CUDA, but will match the address information in XID 63 - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param cause Filter page addresses by cause of retirement - * @param pageCount Reference in which to provide the \a addresses buffer size, and - * to return the number of retired pages that match \a cause - * Set to 0 to query the size without allocating an \a addresses buffer - * @param addresses Buffer to write the page addresses into - * - * @return - * - \ref NVML_SUCCESS if \a pageCount was populated and \a addresses was filled - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a pageCount indicates the buffer is not large enough to store all the - * matching page addresses. \a pageCount is set to the needed size. - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a pageCount is NULL, \a cause is invalid, or - * \a addresses is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPages(nvmlDevice_t device, nvmlPageRetirementCause_t cause, - unsigned int *pageCount, unsigned long long *addresses); - -/** - * Returns the list of retired pages by source, including pages that are pending retirement - * The address information provided from this API is the hardware address of the page that was retired. Note - * that this does not match the virtual address used in CUDA, but will match the address information in XID 63 - * - * \note nvmlDeviceGetRetiredPages_v2 adds an additional timestamps parameter to return the time of each page's - * retirement. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param cause Filter page addresses by cause of retirement - * @param pageCount Reference in which to provide the \a addresses buffer size, and - * to return the number of retired pages that match \a cause - * Set to 0 to query the size without allocating an \a addresses buffer - * @param addresses Buffer to write the page addresses into - * @param timestamps Buffer to write the timestamps of page retirement, additional for _v2 - * - * @return - * - \ref NVML_SUCCESS if \a pageCount was populated and \a addresses was filled - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a pageCount indicates the buffer is not large enough to store all the - * matching page addresses. \a pageCount is set to the needed size. - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a pageCount is NULL, \a cause is invalid, or - * \a addresses is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPages_v2(nvmlDevice_t device, nvmlPageRetirementCause_t cause, - unsigned int *pageCount, unsigned long long *addresses, unsigned long long *timestamps); - -/** - * Check if any pages are pending retirement and need a reboot to fully retire. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param isPending Reference in which to return the pending status - * - * @return - * - \ref NVML_SUCCESS if \a isPending was populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a isPending is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPagesPendingStatus(nvmlDevice_t device, nvmlEnableState_t *isPending); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlUnitCommands Unit Commands - * This chapter describes NVML operations that change the state of the unit. For S-class products. - * Each of these requires root/admin access. Non-admin users will see an NVML_ERROR_NO_PERMISSION - * error code when invoking any of these methods. - * @{ - */ -/***************************************************************************************************/ - -/** - * Set the LED state for the unit. The LED can be either green (0) or amber (1). - * - * For S-class products. - * Requires root/admin permissions. - * - * This operation takes effect immediately. - * - * - * Current S-Class products don't provide unique LEDs for each unit. As such, both front - * and back LEDs will be toggled in unison regardless of which unit is specified with this command. - * - * See \ref nvmlLedColor_t for available colors. - * - * @param unit The identifier of the target unit - * @param color The target LED color - * - * @return - * - \ref NVML_SUCCESS if the LED color has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a unit or \a color is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if this is not an S-class product - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlUnitGetLedState() - */ -nvmlReturn_t DECLDIR nvmlUnitSetLedState(nvmlUnit_t unit, nvmlLedColor_t color); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlDeviceCommands Device Commands - * This chapter describes NVML operations that change the state of the device. - * Each of these requires root/admin access. Non-admin users will see an NVML_ERROR_NO_PERMISSION - * error code when invoking any of these methods. - * @{ - */ -/***************************************************************************************************/ - -/** - * Set the persistence mode for the device. - * - * For all products. - * For Linux only. - * Requires root/admin permissions. - * - * The persistence mode determines whether the GPU driver software is torn down after the last client - * exits. - * - * This operation takes effect immediately. It is not persistent across reboots. After each reboot the - * persistence mode is reset to "Disabled". - * - * See \ref nvmlEnableState_t for available modes. - * - * @param device The identifier of the target device - * @param mode The target persistence mode - * - * @return - * - \ref NVML_SUCCESS if the persistence mode was set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetPersistenceMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceSetPersistenceMode(nvmlDevice_t device, nvmlEnableState_t mode); - -/** - * Set the compute mode for the device. - * - * For all products. - * Requires root/admin permissions. - * - * The compute mode determines whether a GPU can be used for compute operations and whether it can - * be shared across contexts. - * - * This operation takes effect immediately. Under Linux it is not persistent across reboots and - * always resets to "Default". Under windows it is persistent. - * - * Under windows compute mode may only be set to DEFAULT when running in WDDM - * - * See \ref nvmlComputeMode_t for details on available compute modes. - * - * @param device The identifier of the target device - * @param mode The target compute mode - * - * @return - * - \ref NVML_SUCCESS if the compute mode was set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetComputeMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceSetComputeMode(nvmlDevice_t device, nvmlComputeMode_t mode); - -/** - * Set the ECC mode for the device. - * - * For Kepler &tm; or newer fully supported devices. - * Only applicable to devices with ECC. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher. - * Requires root/admin permissions. - * - * The ECC mode determines whether the GPU enables its ECC support. - * - * This operation takes effect after the next reboot. - * - * See \ref nvmlEnableState_t for details on available modes. - * - * @param device The identifier of the target device - * @param ecc The target ECC mode - * - * @return - * - \ref NVML_SUCCESS if the ECC mode was set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a ecc is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetEccMode() - */ -nvmlReturn_t DECLDIR nvmlDeviceSetEccMode(nvmlDevice_t device, nvmlEnableState_t ecc); - -/** - * Clear the ECC error and other memory error counts for the device. - * - * For Kepler &tm; or newer fully supported devices. - * Only applicable to devices with ECC. - * Requires \a NVML_INFOROM_ECC version 2.0 or higher to clear aggregate location-based ECC counts. - * Requires \a NVML_INFOROM_ECC version 1.0 or higher to clear all other ECC counts. - * Requires root/admin permissions. - * Requires ECC Mode to be enabled. - * - * Sets all of the specified ECC counters to 0, including both detailed and total counts. - * - * This operation takes effect immediately. - * - * See \ref nvmlMemoryErrorType_t for details on available counter types. - * - * @param device The identifier of the target device - * @param counterType Flag that indicates which type of errors should be cleared. - * - * @return - * - \ref NVML_SUCCESS if the error counts were cleared - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a counterType is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see - * - nvmlDeviceGetDetailedEccErrors() - * - nvmlDeviceGetTotalEccErrors() - */ -nvmlReturn_t DECLDIR nvmlDeviceClearEccErrorCounts(nvmlDevice_t device, nvmlEccCounterType_t counterType); - -/** - * Set the driver model for the device. - * - * For Fermi &tm; or newer fully supported devices. - * For windows only. - * Requires root/admin permissions. - * - * On Windows platforms the device driver can run in either WDDM or WDM (TCC) mode. If a display is attached - * to the device it must run in WDDM mode. - * - * It is possible to force the change to WDM (TCC) while the display is still attached with a force flag (nvmlFlagForce). - * This should only be done if the host is subsequently powered down and the display is detached from the device - * before the next reboot. - * - * This operation takes effect after the next reboot. - * - * Windows driver model may only be set to WDDM when running in DEFAULT compute mode. - * - * Change driver model to WDDM is not supported when GPU doesn't support graphics acceleration or - * will not support it after reboot. See \ref nvmlDeviceSetGpuOperationMode. - * - * See \ref nvmlDriverModel_t for details on available driver models. - * See \ref nvmlFlagDefault and \ref nvmlFlagForce - * - * @param device The identifier of the target device - * @param driverModel The target driver model - * @param flags Flags that change the default behavior - * - * @return - * - \ref NVML_SUCCESS if the driver model has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a driverModel is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the platform is not windows or the device does not support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetDriverModel() - */ -nvmlReturn_t DECLDIR nvmlDeviceSetDriverModel(nvmlDevice_t device, nvmlDriverModel_t driverModel, unsigned int flags); - -/** - * Set clocks that device will lock to. - * - * Sets the clocks that the device will be running at to the value in the range of minGpuClockMHz to maxGpuClockMHz. - * Setting this will supersede application clock values and take effect regardless if a cuda app is running. - * See /ref nvmlDeviceSetApplicationsClocks - * - * Can be used as a setting to request constant performance. - * - * Requires root/admin permissions. - * - * After system reboot or driver reload applications clocks go back to their default value. - * See \ref nvmlDeviceResetGpuLockedClocks. - * - * For newer than Pascal &tm; fully supported devices. - * - * @param device The identifier of the target device - * @param minGpuClockMHz Requested minimum gpu clock in MHz - * @param maxGpuClockMHz Requested maximum gpu clock in MHz - * - * @return - * - \ref NVML_SUCCESS if new settings were successfully set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minGpuClockMHz and \a maxGpuClockMHz - * is not a valid clock combination - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceSetGpuLockedClocks(nvmlDevice_t device, unsigned int minGpuClockMHz, unsigned int maxGpuClockMHz); - -/** - * Resets the gpu clock to the default value - * - * This is the gpu clock that will be used after system reboot or driver reload. - * Default values are idle clocks, but the current values can be changed using \ref nvmlDeviceSetApplicationsClocks. - * - * @see nvmlDeviceSetGpuLockedClocks - * - * For newer than Pascal &tm; fully supported devices. - * - * @param device The identifier of the target device - * - * @return - * - \ref NVML_SUCCESS if new settings were successfully set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceResetGpuLockedClocks(nvmlDevice_t device); - -/** - * Set clocks that applications will lock to. - * - * Sets the clocks that compute and graphics applications will be running at. - * e.g. CUDA driver requests these clocks during context creation which means this property - * defines clocks at which CUDA applications will be running unless some overspec event - * occurs (e.g. over power, over thermal or external HW brake). - * - * Can be used as a setting to request constant performance. - * - * On Pascal and newer hardware, this will automatically disable automatic boosting of clocks. - * - * On K80 and newer Kepler and Maxwell GPUs, users desiring fixed performance should also call - * \ref nvmlDeviceSetAutoBoostedClocksEnabled to prevent clocks from automatically boosting - * above the clock value being set. - * - * For Kepler &tm; or newer non-GeForce fully supported devices and Maxwell or newer GeForce devices. - * Requires root/admin permissions. - * - * See \ref nvmlDeviceGetSupportedMemoryClocks and \ref nvmlDeviceGetSupportedGraphicsClocks - * for details on how to list available clocks combinations. - * - * After system reboot or driver reload applications clocks go back to their default value. - * See \ref nvmlDeviceResetApplicationsClocks. - * - * @param device The identifier of the target device - * @param memClockMHz Requested memory clock in MHz - * @param graphicsClockMHz Requested graphics clock in MHz - * - * @return - * - \ref NVML_SUCCESS if new settings were successfully set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a memClockMHz and \a graphicsClockMHz - * is not a valid clock combination - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceSetApplicationsClocks(nvmlDevice_t device, unsigned int memClockMHz, unsigned int graphicsClockMHz); - -/** - * Set new power limit of this device. - * - * For Kepler &tm; or newer fully supported devices. - * Requires root/admin permissions. - * - * See \ref nvmlDeviceGetPowerManagementLimitConstraints to check the allowed ranges of values. - * - * \note Limit is not persistent across reboots or driver unloads. - * Enable persistent mode to prevent driver from unloading when no application is using the device. - * - * @param device The identifier of the target device - * @param limit Power management limit in milliwatts to set - * - * @return - * - \ref NVML_SUCCESS if \a limit has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a defaultLimit is out of range - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceGetPowerManagementLimitConstraints - * @see nvmlDeviceGetPowerManagementDefaultLimit - */ -nvmlReturn_t DECLDIR nvmlDeviceSetPowerManagementLimit(nvmlDevice_t device, unsigned int limit); - -/** - * Sets new GOM. See \a nvmlGpuOperationMode_t for details. - * - * For GK110 M-class and X-class Tesla &tm; products from the Kepler family. - * Modes \ref NVML_GOM_LOW_DP and \ref NVML_GOM_ALL_ON are supported on fully supported GeForce products. - * Not supported on Quadro ® and Tesla &tm; C-class products. - * Requires root/admin permissions. - * - * Changing GOMs requires a reboot. - * The reboot requirement might be removed in the future. - * - * Compute only GOMs don't support graphics acceleration. Under windows switching to these GOMs when - * pending driver model is WDDM is not supported. See \ref nvmlDeviceSetDriverModel. - * - * @param device The identifier of the target device - * @param mode Target GOM - * - * @return - * - \ref NVML_SUCCESS if \a mode has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a mode incorrect - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support GOM or specific mode - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlGpuOperationMode_t - * @see nvmlDeviceGetGpuOperationMode - */ -nvmlReturn_t DECLDIR nvmlDeviceSetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t mode); - -/** - * Changes the root/admin restructions on certain APIs. See \a nvmlRestrictedAPI_t for the list of supported APIs. - * This method can be used by a root/admin user to give non-root/admin access to certain otherwise-restricted APIs. - * The new setting lasts for the lifetime of the NVIDIA driver; it is not persistent. See \a nvmlDeviceGetAPIRestriction - * to query the current restriction settings. - * - * For Kepler &tm; or newer fully supported devices. - * Requires root/admin permissions. - * - * @param device The identifier of the target device - * @param apiType Target API type for this operation - * @param isRestricted The target restriction - * - * @return - * - \ref NVML_SUCCESS if \a isRestricted has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a apiType incorrect - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support changing API restrictions or the device does not support - * the feature that api restrictions are being set for (E.G. Enabling/disabling auto - * boosted clocks is not supported by the device) - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlRestrictedAPI_t - */ -nvmlReturn_t DECLDIR nvmlDeviceSetAPIRestriction(nvmlDevice_t device, nvmlRestrictedAPI_t apiType, nvmlEnableState_t isRestricted); - -/** - * @} - */ - -/** @addtogroup nvmlAccountingStats - * @{ - */ - -/** - * Enables or disables per process accounting. - * - * For Kepler &tm; or newer fully supported devices. - * Requires root/admin permissions. - * - * @note This setting is not persistent and will default to disabled after driver unloads. - * Enable persistence mode to be sure the setting doesn't switch off to disabled. - * - * @note Enabling accounting mode has no negative impact on the GPU performance. - * - * @note Disabling accounting clears all accounting pids information. - * - * See \ref nvmlDeviceGetAccountingMode - * See \ref nvmlDeviceGetAccountingStats - * See \ref nvmlDeviceClearAccountingPids - * - * @param device The identifier of the target device - * @param mode The target accounting mode - * - * @return - * - \ref NVML_SUCCESS if the new mode has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a mode are invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceSetAccountingMode(nvmlDevice_t device, nvmlEnableState_t mode); - -/** - * Clears accounting information about all processes that have already terminated. - * - * For Kepler &tm; or newer fully supported devices. - * Requires root/admin permissions. - * - * See \ref nvmlDeviceGetAccountingMode - * See \ref nvmlDeviceGetAccountingStats - * See \ref nvmlDeviceSetAccountingMode - * - * @param device The identifier of the target device - * - * @return - * - \ref NVML_SUCCESS if accounting information has been cleared - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device are invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceClearAccountingPids(nvmlDevice_t device); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup NvLink NvLink Methods - * This chapter describes methods that NVML can perform on NVLINK enabled devices. - * @{ - */ -/***************************************************************************************************/ - -/** - * Retrieves the state of the device's NvLink for the link specified - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param isActive \a nvmlEnableState_t where NVML_FEATURE_ENABLED indicates that - * the link is active and NVML_FEATURE_DISABLED indicates it - * is inactive - * - * @return - * - \ref NVML_SUCCESS if \a isActive has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a isActive is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkState(nvmlDevice_t device, unsigned int link, nvmlEnableState_t *isActive); - -/** - * Retrieves the version of the device's NvLink for the link specified - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param version Requested NvLink version - * - * @return - * - \ref NVML_SUCCESS if \a version has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a version is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkVersion(nvmlDevice_t device, unsigned int link, unsigned int *version); - -/** - * Retrieves the requested capability from the device's NvLink for the link specified - * Please refer to the \a nvmlNvLinkCapability_t structure for the specific caps that can be queried - * The return value should be treated as a boolean. - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param capability Specifies the \a nvmlNvLinkCapability_t to be queried - * @param capResult A boolean for the queried capability indicating that feature is available - * - * @return - * - \ref NVML_SUCCESS if \a capResult has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a capability is invalid or \a capResult is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkCapability(nvmlDevice_t device, unsigned int link, - nvmlNvLinkCapability_t capability, unsigned int *capResult); - -/** - * Retrieves the PCI information for the remote node on a NvLink link - * Note: pciSubSystemId is not filled in this function and is indeterminate - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param pci \a nvmlPciInfo_t of the remote node for the specified link - * - * @return - * - \ref NVML_SUCCESS if \a pci has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid or \a pci is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkRemotePciInfo(nvmlDevice_t device, unsigned int link, nvmlPciInfo_t *pci); - -/** - * Retrieves the specified error counter value - * Please refer to \a nvmlNvLinkErrorCounter_t for error counters that are available - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param counter Specifies the NvLink counter to be queried - * @param counterValue Returned counter value - * - * @return - * - \ref NVML_SUCCESS if \a counter has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a counter is invalid or \a counterValue is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkErrorCounter(nvmlDevice_t device, unsigned int link, - nvmlNvLinkErrorCounter_t counter, unsigned long long *counterValue); - -/** - * Resets all error counters to zero - * Please refer to \a nvmlNvLinkErrorCounter_t for the list of error counters that are reset - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * - * @return - * - \ref NVML_SUCCESS if the reset is successful - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceResetNvLinkErrorCounters(nvmlDevice_t device, unsigned int link); - -/** - * Set the NVLINK utilization counter control information for the specified counter, 0 or 1. - * Please refer to \a nvmlNvLinkUtilizationControl_t for the structure definition. Performs a reset - * of the counters if the reset parameter is non-zero. - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param counter Specifies the counter that should be set (0 or 1). - * @param link Specifies the NvLink link to be queried - * @param control A reference to the \a nvmlNvLinkUtilizationControl_t to set - * @param reset Resets the counters on set if non-zero - * - * @return - * - \ref NVML_SUCCESS if the control has been set successfully - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, \a link, or \a control is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceSetNvLinkUtilizationControl(nvmlDevice_t device, unsigned int link, unsigned int counter, - nvmlNvLinkUtilizationControl_t *control, unsigned int reset); - -/** - * Get the NVLINK utilization counter control information for the specified counter, 0 or 1. - * Please refer to \a nvmlNvLinkUtilizationControl_t for the structure definition - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param counter Specifies the counter that should be set (0 or 1). - * @param link Specifies the NvLink link to be queried - * @param control A reference to the \a nvmlNvLinkUtilizationControl_t to place information - * - * @return - * - \ref NVML_SUCCESS if the control has been set successfully - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, \a link, or \a control is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkUtilizationControl(nvmlDevice_t device, unsigned int link, unsigned int counter, - nvmlNvLinkUtilizationControl_t *control); - - -/** - * Retrieve the NVLINK utilization counter based on the current control for a specified counter. - * In general it is good practice to use \a nvmlDeviceSetNvLinkUtilizationControl - * before reading the utilization counters as they have no default state - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param counter Specifies the counter that should be read (0 or 1). - * @param rxcounter Receive counter return value - * @param txcounter Transmit counter return value - * - * @return - * - \ref NVML_SUCCESS if \a rxcounter and \a txcounter have been successfully set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a counter, or \a link is invalid or \a rxcounter or \a txcounter are NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkUtilizationCounter(nvmlDevice_t device, unsigned int link, unsigned int counter, - unsigned long long *rxcounter, unsigned long long *txcounter); - -/** - * Freeze the NVLINK utilization counters - * Both the receive and transmit counters are operated on by this function - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be queried - * @param counter Specifies the counter that should be frozen (0 or 1). - * @param freeze NVML_FEATURE_ENABLED = freeze the receive and transmit counters - * NVML_FEATURE_DISABLED = unfreeze the receive and transmit counters - * - * @return - * - \ref NVML_SUCCESS if counters were successfully frozen or unfrozen - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, \a counter, or \a freeze is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceFreezeNvLinkUtilizationCounter (nvmlDevice_t device, unsigned int link, - unsigned int counter, nvmlEnableState_t freeze); - -/** - * Reset the NVLINK utilization counters - * Both the receive and transmit counters are operated on by this function - * - * For Pascal &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param link Specifies the NvLink link to be reset - * @param counter Specifies the counter that should be reset (0 or 1) - * - * @return - * - \ref NVML_SUCCESS if counters were successfully reset - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a link, or \a counter is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceResetNvLinkUtilizationCounter (nvmlDevice_t device, unsigned int link, unsigned int counter); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlEvents Event Handling Methods - * This chapter describes methods that NVML can perform against each device to register and wait for - * some event to occur. - * @{ - */ -/***************************************************************************************************/ - -/** - * Create an empty set of events. - * Event set should be freed by \ref nvmlEventSetFree - * - * For Fermi &tm; or newer fully supported devices. - * @param set Reference in which to return the event handle - * - * @return - * - \ref NVML_SUCCESS if the event has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a set is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlEventSetFree - */ -nvmlReturn_t DECLDIR nvmlEventSetCreate(nvmlEventSet_t *set); - -/** - * Starts recording of events on a specified devices and add the events to specified \ref nvmlEventSet_t - * - * For Fermi &tm; or newer fully supported devices. - * Ecc events are available only on ECC enabled devices (see \ref nvmlDeviceGetTotalEccErrors) - * Power capping events are available only on Power Management enabled devices (see \ref nvmlDeviceGetPowerManagementMode) - * - * For Linux only. - * - * \b IMPORTANT: Operations on \a set are not thread safe - * - * This call starts recording of events on specific device. - * All events that occurred before this call are not recorded. - * Checking if some event occurred can be done with \ref nvmlEventSetWait - * - * If function reports NVML_ERROR_UNKNOWN, event set is in undefined state and should be freed. - * If function reports NVML_ERROR_NOT_SUPPORTED, event set can still be used. None of the requested eventTypes - * are registered in that case. - * - * @param device The identifier of the target device - * @param eventTypes Bitmask of \ref nvmlEventType to record - * @param set Set to which add new event types - * - * @return - * - \ref NVML_SUCCESS if the event has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventTypes is invalid or \a set is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the platform does not support this feature or some of requested event types - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlEventType - * @see nvmlDeviceGetSupportedEventTypes - * @see nvmlEventSetWait - * @see nvmlEventSetFree - */ -nvmlReturn_t DECLDIR nvmlDeviceRegisterEvents(nvmlDevice_t device, unsigned long long eventTypes, nvmlEventSet_t set); - -/** - * Returns information about events supported on device - * - * For Fermi &tm; or newer fully supported devices. - * - * Events are not supported on Windows. So this function returns an empty mask in \a eventTypes on Windows. - * - * @param device The identifier of the target device - * @param eventTypes Reference in which to return bitmask of supported events - * - * @return - * - \ref NVML_SUCCESS if the eventTypes has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a eventType is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlEventType - * @see nvmlDeviceRegisterEvents - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedEventTypes(nvmlDevice_t device, unsigned long long *eventTypes); - -/** - * Waits on events and delivers events - * - * For Fermi &tm; or newer fully supported devices. - * - * If some events are ready to be delivered at the time of the call, function returns immediately. - * If there are no events ready to be delivered, function sleeps till event arrives - * but not longer than specified timeout. This function in certain conditions can return before - * specified timeout passes (e.g. when interrupt arrives) - * - * In case of xid error, the function returns the most recent xid error type seen by the system. If there are multiple - * xid errors generated before nvmlEventSetWait is invoked then the last seen xid error type is returned for all - * xid error events. - * - * @param set Reference to set of events to wait on - * @param data Reference in which to return event data - * @param timeoutms Maximum amount of wait time in milliseconds for registered event - * - * @return - * - \ref NVML_SUCCESS if the data has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a data is NULL - * - \ref NVML_ERROR_TIMEOUT if no event arrived in specified timeout or interrupt arrived - * - \ref NVML_ERROR_GPU_IS_LOST if a GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlEventType - * @see nvmlDeviceRegisterEvents - */ -nvmlReturn_t DECLDIR nvmlEventSetWait(nvmlEventSet_t set, nvmlEventData_t * data, unsigned int timeoutms); - -/** - * Releases events in the set - * - * For Fermi &tm; or newer fully supported devices. - * - * @param set Reference to events to be released - * - * @return - * - \ref NVML_SUCCESS if the event has been successfully released - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlDeviceRegisterEvents - */ -nvmlReturn_t DECLDIR nvmlEventSetFree(nvmlEventSet_t set); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlZPI Drain states - * This chapter describes methods that NVML can perform against each device to control their drain state - * and recognition by NVML and NVIDIA kernel driver. These methods can be used with out-of-band tools to - * power on/off GPUs, enable robust reset scenarios, etc. - * @{ - */ -/***************************************************************************************************/ - -/** - * Modify the drain state of a GPU. This method forces a GPU to no longer accept new incoming requests. - * Any new NVML process will no longer see this GPU. Persistence mode for this GPU must be turned off before - * this call is made. - * Must be called as administrator. - * For Linux only. - * - * For Pascal &tm; or newer fully supported devices. - * Some Kepler devices supported. - * - * @param pciInfo The PCI address of the GPU drain state to be modified - * @param newState The drain state that should be entered, see \ref nvmlEnableState_t - * - * @return - * - \ref NVML_SUCCESS if counters were successfully reset - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex or \a newState is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_NO_PERMISSION if the calling process has insufficient permissions to perform operation - * - \ref NVML_ERROR_IN_USE if the device has persistence mode turned on - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceModifyDrainState (nvmlPciInfo_t *pciInfo, nvmlEnableState_t newState); - -/** - * Query the drain state of a GPU. This method is used to check if a GPU is in a currently draining - * state. - * For Linux only. - * - * For Pascal &tm; or newer fully supported devices. - * Some Kepler devices supported. - * - * @param pciInfo The PCI address of the GPU drain state to be queried - * @param currentState The current drain state for this GPU, see \ref nvmlEnableState_t - * - * @return - * - \ref NVML_SUCCESS if counters were successfully reset - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex or \a currentState is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceQueryDrainState (nvmlPciInfo_t *pciInfo, nvmlEnableState_t *currentState); - -/** - * This method will remove the specified GPU from the view of both NVML and the NVIDIA kernel driver - * as long as no other processes are attached. If other processes are attached, this call will return - * NVML_ERROR_IN_USE and the GPU will be returned to its original "draining" state. Note: the - * only situation where a process can still be attached after nvmlDeviceModifyDrainState() is called - * to initiate the draining state is if that process was using, and is still using, a GPU before the - * call was made. Also note, persistence mode counts as an attachment to the GPU thus it must be disabled - * prior to this call. - * - * For long-running NVML processes please note that this will change the enumeration of current GPUs. - * For example, if there are four GPUs present and GPU1 is removed, the new enumeration will be 0-2. - * Also, device handles after the removed GPU will not be valid and must be re-established. - * Must be run as administrator. - * For Linux only. - * - * For Pascal &tm; or newer fully supported devices. - * Some Kepler devices supported. - * - * @param pciInfo The PCI address of the GPU to be removed - * @param gpuState Whether the GPU is to be removed, from the OS - * see \ref nvmlDetachGpuState_t - * @param linkState Requested upstream PCIe link state, see \ref nvmlPcieLinkState_t - * - * @return - * - \ref NVML_SUCCESS if counters were successfully reset - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a nvmlIndex is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature - * - \ref NVML_ERROR_IN_USE if the device is still in use and cannot be removed - */ -nvmlReturn_t DECLDIR nvmlDeviceRemoveGpu (nvmlPciInfo_t *pciInfo, nvmlDetachGpuState_t gpuState, nvmlPcieLinkState_t linkState); - -/** - * Request the OS and the NVIDIA kernel driver to rediscover a portion of the PCI subsystem looking for GPUs that - * were previously removed. The portion of the PCI tree can be narrowed by specifying a domain, bus, and device. - * If all are zeroes then the entire PCI tree will be searched. Please note that for long-running NVML processes - * the enumeration will change based on how many GPUs are discovered and where they are inserted in bus order. - * - * In addition, all newly discovered GPUs will be initialized and their ECC scrubbed which may take several seconds - * per GPU. Also, all device handles are no longer guaranteed to be valid post discovery. - * - * Must be run as administrator. - * For Linux only. - * - * For Pascal &tm; or newer fully supported devices. - * Some Kepler devices supported. - * - * @param pciInfo The PCI tree to be searched. Only the domain, bus, and device - * fields are used in this call. - * - * @return - * - \ref NVML_SUCCESS if counters were successfully reset - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pciInfo is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if the operating system does not support this feature - * - \ref NVML_ERROR_OPERATING_SYSTEM if the operating system is denying this feature - * - \ref NVML_ERROR_NO_PERMISSION if the calling process has insufficient permissions to perform operation - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceDiscoverGpus (nvmlPciInfo_t *pciInfo); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlFieldValueQueries Field Value Queries - * This chapter describes NVML operations that are associated with retrieving Field Values from NVML - * @{ - */ -/***************************************************************************************************/ - -/** - * Request values for a list of fields for a device. This API allows multiple fields to be queried at once. - * If any of the underlying fieldIds are populated by the same driver call, the results for those field IDs - * will be populated from a single call rather than making a driver call for each fieldId. - * - * @param device The device handle of the GPU to request field values for - * @param valuesCount Number of entries in values that should be retrieved - * @param values Array of \a valuesCount structures to hold field values. - * Each value's fieldId must be populated prior to this call - * - * @return - * - \ref NVML_SUCCESS if any values in \a values were populated. Note that you must - * check the nvmlReturn field of each value for each individual - * status - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a values is NULL - */ -nvmlReturn_t DECLDIR nvmlDeviceGetFieldValues(nvmlDevice_t device, int valuesCount, nvmlFieldValue_t *values); - - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlGridQueries Grid Queries - * This chapter describes NVML operations that are associated with NVIDIA GRID products. - * @{ - */ -/***************************************************************************************************/ - -/** - * This method is used to get the virtualization mode corresponding to the GPU. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device Identifier of the target device - * @param pVirtualMode Reference to virtualization mode. One of NVML_GPU_VIRTUALIZATION_? - * - * @return - * - \ref NVML_SUCCESS if \a pVirtualMode is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pVirtualMode is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t *pVirtualMode); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlGridCommands Grid Commands - * This chapter describes NVML operations that are associated with NVIDIA GRID products. - * @{ - */ -/***************************************************************************************************/ - -/** - * This method is used to set the virtualization mode corresponding to the GPU. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device Identifier of the target device - * @param virtualMode virtualization mode. One of NVML_GPU_VIRTUALIZATION_? - * - * @return - * - \ref NVML_SUCCESS if \a pVirtualMode is set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pVirtualMode is NULL - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_NOT_SUPPORTED if setting of virtualization mode is not supported. - * - \ref NVML_ERROR_NO_PERMISSION if setting of virtualization mode is not allowed for this client. - */ -nvmlReturn_t DECLDIR nvmlDeviceSetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t virtualMode); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlVgpu vGPU Management - * @{ - * - * Set of APIs supporting GRID vGPU - */ -/***************************************************************************************************/ - -/** - * Retrieve the supported vGPU types on a physical GPU (device). - * - * An array of supported vGPU types for the physical GPU indicated by \a device is returned in the caller-supplied buffer - * pointed at by \a vgpuTypeIds. The element count of nvmlVgpuTypeId_t array is passed in \a vgpuCount, and \a vgpuCount - * is used to return the number of vGPU types written to the buffer. - * - * If the supplied buffer is not large enough to accommodate the vGPU type array, the function returns - * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount. - * To query the number of vGPU types supported for the GPU, call this function with *vgpuCount = 0. - * The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are supported. - * - * @param device The identifier of the target device - * @param vgpuCount Pointer to caller-supplied array size, and returns number of vGPU types - * @param vgpuTypeIds Pointer to caller-supplied array in which to return list of vGPU types - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_INSUFFICIENT_SIZE \a vgpuTypeIds buffer is too small, array element count is returned in \a vgpuCount - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuCount is NULL or \a device is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device - * - \ref NVML_ERROR_VGPU_ECC_NOT_SUPPORTED if ECC is enabled on the device - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetSupportedVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuTypeId_t *vgpuTypeIds); - -/** - * Retrieve the currently creatable vGPU types on a physical GPU (device). - * - * An array of creatable vGPU types for the physical GPU indicated by \a device is returned in the caller-supplied buffer - * pointed at by \a vgpuTypeIds. The element count of nvmlVgpuTypeId_t array is passed in \a vgpuCount, and \a vgpuCount - * is used to return the number of vGPU types written to the buffer. - * - * The creatable vGPU types for a device may differ over time, as there may be restrictions on what type of vGPU types - * can concurrently run on a device. For example, if only one vGPU type is allowed at a time on a device, then the creatable - * list will be restricted to whatever vGPU type is already running on the device. - * - * If the supplied buffer is not large enough to accommodate the vGPU type array, the function returns - * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuTypeId_t array required in \a vgpuCount. - * To query the number of vGPU types creatable for the GPU, call this function with *vgpuCount = 0. - * The code will return NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU types are creatable. - * - * @param device The identifier of the target device - * @param vgpuCount Pointer to caller-supplied array size, and returns number of vGPU types - * @param vgpuTypeIds Pointer to caller-supplied array in which to return list of vGPU types - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_INSUFFICIENT_SIZE \a vgpuTypeIds buffer is too small, array element count is returned in \a vgpuCount - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuCount is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device - * - \ref NVML_ERROR_VGPU_ECC_NOT_SUPPORTED if ECC is enabled on the device - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetCreatableVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuTypeId_t *vgpuTypeIds); - -/** - * Retrieve the class of a vGPU type. It will not exceed 64 characters in length (including the NUL terminator). - * See \ref nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param vgpuTypeClass Pointer to string array to return class in - * @param size Size of string - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuTypeClass is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetClass(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeClass, unsigned int *size); - -/** - * Retrieve the vGPU type name. - * - * The name is an alphanumeric string that denotes a particular vGPU, e.g. GRID M60-2Q. It will not - * exceed 64 characters in length (including the NUL terminator). See \ref - * nvmlConstants::NVML_DEVICE_NAME_BUFFER_SIZE. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param vgpuTypeName Pointer to buffer to return name - * @param size Size of buffer - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a name is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetName(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeName, unsigned int *size); - -/** - * Retrieve the device ID of a vGPU type. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param deviceID Device ID and vendor ID of the device contained in single 32 bit value - * @param subsystemID subsystem ID and subsystem vendor ID of the device contained in single 32 bit value - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a deviceId or \a subsystemID are NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetDeviceID(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long *deviceID, unsigned long long *subsystemID); - -/** - * Retrieve the vGPU framebuffer size in bytes. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param fbSize Pointer to framebuffer size in bytes - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a fbSize is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetFramebufferSize(nvmlVgpuTypeId_t vgpuTypeId, unsigned long long *fbSize); - -/** - * Retrieve count of vGPU's supported display heads. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param numDisplayHeads Pointer to number of display heads - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a numDisplayHeads is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetNumDisplayHeads(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *numDisplayHeads); - -/** - * Retrieve vGPU display head's maximum supported resolution. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param displayIndex Zero-based index of display head - * @param xdim Pointer to maximum number of pixels in X dimension - * @param ydim Pointer to maximum number of pixels in Y dimension - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a xdim or \a ydim are NULL, or \a displayIndex - * is out of range. - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetResolution(nvmlVgpuTypeId_t vgpuTypeId, unsigned int displayIndex, unsigned int *xdim, unsigned int *ydim); - -/** - * Retrieve license requirements for a vGPU type - * - * The license type and version required to run the specified vGPU type is returned as an alphanumeric string, in the form - * ",", for example "GRID-Virtual-PC,2.0". If a vGPU is runnable with* more than one type of license, - * the licenses are delimited by a semicolon, for example "GRID-Virtual-PC,2.0;GRID-Virtual-WS,2.0;GRID-Virtual-WS-Ext,2.0". - * - * The total length of the returned string will not exceed 128 characters, including the NUL terminator. - * See \ref nvmlVgpuConstants::NVML_GRID_LICENSE_BUFFER_SIZE. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param vgpuTypeLicenseString Pointer to buffer to return license info - * @param size Size of \a vgpuTypeLicenseString buffer - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a vgpuTypeLicenseString is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetLicense(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeLicenseString, unsigned int size); - -/** - * Retrieve the static frame rate limit value of the vGPU type - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuTypeId Handle to vGPU type - * @param frameRateLimit Reference to return the frame rate limit value - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_NOT_SUPPORTED if frame rate limiter is turned off for the vGPU type - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a frameRateLimit is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetFrameRateLimit(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *frameRateLimit); - -/** - * Retrieve the maximum number of vGPU instances creatable on a device for given vGPU type - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param vgpuTypeId Handle to vGPU type - * @param vgpuInstanceCount Pointer to get the max number of vGPU instances - * that can be created on a deicve for given vgpuTypeId - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid or is not supported on target device, - * or \a vgpuInstanceCount is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuTypeGetMaxInstances(nvmlDevice_t device, nvmlVgpuTypeId_t vgpuTypeId, unsigned int *vgpuInstanceCount); - -/** - * Retrieve the active vGPU instances on a device. - * - * An array of active vGPU instances is returned in the caller-supplied buffer pointed at by \a vgpuInstances. The - * array element count is passed in \a vgpuCount, and \a vgpuCount is used to return the number of vGPU instances - * written to the buffer. - * - * If the supplied buffer is not large enough to accommodate the vGPU instance array, the function returns - * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlVgpuInstance_t array required in \a vgpuCount. - * To query the number of active vGPU instances, call this function with *vgpuCount = 0. The code will return - * NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if no vGPU Types are supported. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param device The identifier of the target device - * @param vgpuCount Pointer which passes in the array size as well as get - * back the number of types - * @param vgpuInstances Pointer to array in which to return list of vGPU instances - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a vgpuCount is NULL - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetActiveVgpus(nvmlDevice_t device, unsigned int *vgpuCount, nvmlVgpuInstance_t *vgpuInstances); - -/** - * Retrieve the VM ID associated with a vGPU instance. - * - * The VM ID is returned as a string, not exceeding 80 characters in length (including the NUL terminator). - * See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. - * - * The format of the VM ID varies by platform, and is indicated by the type identifier returned in \a vmIdType. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param vmId Pointer to caller-supplied buffer to hold VM ID - * @param size Size of buffer in bytes - * @param vmIdType Pointer to hold VM ID type - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vmId or \a vmIdType is NULL, or \a vgpuInstance is 0 - * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetVmID(nvmlVgpuInstance_t vgpuInstance, char *vmId, unsigned int size, nvmlVgpuVmIdType_t *vmIdType); - -/** - * Retrieve the UUID of a vGPU instance. - * - * The UUID is a globally unique identifier associated with the vGPU, and is returned as a 5-part hexadecimal string, - * not exceeding 80 characters in length (including the NULL terminator). - * See \ref nvmlConstants::NVML_DEVICE_UUID_BUFFER_SIZE. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param uuid Pointer to caller-supplied buffer to hold vGPU UUID - * @param size Size of buffer in bytes - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a uuid is NULL - * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a size is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetUUID(nvmlVgpuInstance_t vgpuInstance, char *uuid, unsigned int size); - -/** - * Retrieve the NVIDIA driver version installed in the VM associated with a vGPU. - * - * The version is returned as an alphanumeric string in the caller-supplied buffer \a version. The length of the version - * string will not exceed 80 characters in length (including the NUL terminator). - * See \ref nvmlConstants::NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE. - * - * nvmlVgpuInstanceGetVmDriverVersion() may be called at any time for a vGPU instance. The guest VM driver version is - * returned as "Unknown" if no NVIDIA driver is installed in the VM, or the VM has not yet booted to the point where the - * NVIDIA driver is loaded and initialized. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param version Caller-supplied buffer to return driver version string - * @param length Size of \a version buffer - * - * @return - * - \ref NVML_SUCCESS if \a version has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0 - * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetVmDriverVersion(nvmlVgpuInstance_t vgpuInstance, char* version, unsigned int length); - -/** - * Retrieve the framebuffer usage in bytes. - * - * Framebuffer usage is the amont of vGPU framebuffer memory that is currently in use by the VM. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance The identifier of the target instance - * @param fbUsage Pointer to framebuffer usage in bytes - * - * @return - * - \ref NVML_SUCCESS successful completion - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a fbUsage is NULL - * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFbUsage(nvmlVgpuInstance_t vgpuInstance, unsigned long long *fbUsage); - -/** - * Retrieve the current licensing state of the vGPU instance. - * - * If the vGPU is currently licensed, \a licensed is set to 1, otherwise it is set to 0. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param licensed Reference to return the licensing status - * - * @return - * - \ref NVML_SUCCESS if \a licensed has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a licensed is NULL - * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetLicenseStatus(nvmlVgpuInstance_t vgpuInstance, unsigned int *licensed); - -/** - * Retrieve the vGPU type of a vGPU instance. - * - * Returns the vGPU type ID of vgpu assigned to the vGPU instance. - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param vgpuTypeId Reference to return the vgpuTypeId - * - * @return - * - \ref NVML_SUCCESS if \a vgpuTypeId has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a vgpuTypeId is NULL - * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetType(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuTypeId_t *vgpuTypeId); - -/** - * Retrieve the frame rate limit set for the vGPU instance. - * - * Returns the value of the frame rate limit set for the vGPU instance - * - * For Kepler &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param frameRateLimit Reference to return the frame rate limit - * - * @return - * - \ref NVML_SUCCESS if \a frameRateLimit has been set - * - \ref NVML_ERROR_NOT_SUPPORTED if frame rate limiter is turned off for the vGPU type - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a frameRateLimit is NULL - * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFrameRateLimit(nvmlVgpuInstance_t vgpuInstance, unsigned int *frameRateLimit); - -/** - * Retrieve the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param encoderCapacity Reference to an unsigned int for the encoder capacity - * - * @return - * - \ref NVML_SUCCESS if \a encoderCapacity has been retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a encoderQueryType is invalid - * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int *encoderCapacity); - -/** - * Set the encoder capacity of a vGPU instance, as a percentage of maximum encoder capacity with valid values in the range 0-100. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param encoderCapacity Unsigned int for the encoder capacity value - * - * @return - * - \ref NVML_SUCCESS if \a encoderCapacity has been set - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0 - * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceSetEncoderCapacity(nvmlVgpuInstance_t vgpuInstance, unsigned int encoderCapacity); - -/** - * Retrieves current utilization for vGPUs on a physical GPU (device). - * - * For Kepler &tm; or newer fully supported devices. - * - * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for vGPU instances running - * on a device. Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer - * pointed at by \a utilizationSamples. One utilization sample structure is returned per vGPU instance, and includes the - * CPU timestamp at which the samples were recorded. Individual utilization values are returned as "unsigned int" values - * in nvmlValue_t unions. The function sets the caller-supplied \a sampleValType to NVML_VALUE_TYPE_UNSIGNED_INT to - * indicate the returned value type. - * - * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with - * \a utilizationSamples set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance - * count in \a vgpuInstanceSamplesCount, or NVML_SUCCESS if the current vGPU instance count is zero. The caller should allocate - * a buffer of size vgpuInstanceSamplesCount * sizeof(nvmlVgpuInstanceUtilizationSample_t). Invoke the function again with - * the allocated buffer passed in \a utilizationSamples, and \a vgpuInstanceSamplesCount set to the number of entries the - * buffer is sized for. - * - * On successful return, the function updates \a vgpuInstanceSampleCount with the number of vGPU utilization sample - * structures that were actually written. This may differ from a previously read value as vGPU instances are created or - * destroyed. - * - * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 - * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp - * to a timeStamp retrieved from a previous query to read utilization since the previous query. - * - * @param device The identifier for the target device - * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. - * @param sampleValType Pointer to caller-supplied buffer to hold the type of returned sample values - * @param vgpuInstanceSamplesCount Pointer to caller-supplied array size, and returns number of vGPU instances - * @param utilizationSamples Pointer to caller-supplied buffer in which vGPU utilization samples are returned - - * @return - * - \ref NVML_SUCCESS if utilization samples are successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuInstanceSamplesCount or \a sampleValType is - * NULL, or a sample count of 0 is passed with a non-NULL \a utilizationSamples - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if supplied \a vgpuInstanceSamplesCount is too small to return samples for all - * vGPU instances currently executing on the device - * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetVgpuUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, - nvmlValueType_t *sampleValType, unsigned int *vgpuInstanceSamplesCount, - nvmlVgpuInstanceUtilizationSample_t *utilizationSamples); - -/** - * Retrieves current utilization for processes running on vGPUs on a physical GPU (device). - * - * For Maxwell &tm; or newer fully supported devices. - * - * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running on - * vGPU instances active on a device. Utilization values are returned as an array of utilization sample structures in the - * caller-supplied buffer pointed at by \a utilizationSamples. One utilization sample structure is returned per process running - * on vGPU instances, that had some non-zero utilization during the last sample period. It includes the CPU timestamp at which - * the samples were recorded. Individual utilization values are returned as "unsigned int" values. - * - * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with - * \a utilizationSamples set to NULL. The function will return NVML_ERROR_INSUFFICIENT_SIZE, with the current vGPU instance - * count in \a vgpuProcessSamplesCount. The caller should allocate a buffer of size - * vgpuProcessSamplesCount * sizeof(nvmlVgpuProcessUtilizationSample_t). Invoke the function again with - * the allocated buffer passed in \a utilizationSamples, and \a vgpuProcessSamplesCount set to the number of entries the - * buffer is sized for. - * - * On successful return, the function updates \a vgpuSubProcessSampleCount with the number of vGPU sub process utilization sample - * structures that were actually written. This may differ from a previously read value depending on the number of processes that are active - * in any given sample period. - * - * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 - * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp - * to a timeStamp retrieved from a previous query to read utilization since the previous query. - * - * @param device The identifier for the target device - * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. - * @param vgpuProcessSamplesCount Pointer to caller-supplied array size, and returns number of processes running on vGPU instances - * @param utilizationSamples Pointer to caller-supplied buffer in which vGPU sub process utilization samples are returned - - * @return - * - \ref NVML_SUCCESS if utilization samples are successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a vgpuProcessSamplesCount or a sample count of 0 is - * passed with a non-NULL \a utilizationSamples - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if supplied \a vgpuProcessSamplesCount is too small to return samples for all - * vGPU instances currently executing on the device - * - \ref NVML_ERROR_NOT_SUPPORTED if vGPU is not supported by the device - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetVgpuProcessUtilization(nvmlDevice_t device, unsigned long long lastSeenTimeStamp, - unsigned int *vgpuProcessSamplesCount, - nvmlVgpuProcessUtilizationSample_t *utilizationSamples); -/** - * Retrieve the GRID licensable features. - * - * Identifies whether the system supports GRID Software Licensing. If it does, return the list of licensable feature(s) - * and their current license status. - * - * @param device Identifier of the target device - * @param pGridLicensableFeatures Pointer to structure in which GRID licensable features are returned - * - * @return - * - \ref NVML_SUCCESS if licensable features are successfully retrieved - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pGridLicensableFeatures is NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); - -/** - * Retrieves the current encoder statistics of a vGPU Instance - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param sessionCount Reference to an unsigned int for count of active encoder sessions - * @param averageFps Reference to an unsigned int for trailing average FPS of all active sessions - * @param averageLatency Reference to an unsigned int for encode latency in microseconds - * - * @return - * - \ref NVML_SUCCESS if \a sessionCount, \a averageFps and \a averageLatency is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount , or \a averageFps or \a averageLatency is NULL - * or \a vgpuInstance is 0. - * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderStats(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, - unsigned int *averageFps, unsigned int *averageLatency); - -/** - * Retrieves information about all active encoder sessions on a vGPU Instance. - * - * An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The - * array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions - * written to the buffer. - * - * If the supplied buffer is not large enough to accommodate the active session array, the function returns - * NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlEncoderSessionInfo_t array required in \a sessionCount. - * To query the number of active encoder sessions, call this function with *sessionCount = 0. The code will return - * NVML_SUCCESS with number of active encoder sessions updated in *sessionCount. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param vgpuInstance Identifier of the target vGPU instance - * @param sessionCount Reference to caller supplied array size, and returns - * the number of sessions. - * @param sessionInfo Reference to caller supplied array in which the list - * of session information us returned. - * - * @return - * - \ref NVML_SUCCESS if \a sessionInfo is fetched - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is - returned in \a sessionCount - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL, or \a vgpuInstance is 0. - * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetEncoderSessions(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, nvmlEncoderSessionInfo_t *sessionInfo); - -/** -* Retrieves the active frame buffer capture sessions statistics of a vGPU Instance -* -* For Maxwell &tm; or newer fully supported devices. -* -* @param vgpuInstance Identifier of the target vGPU instance -* @param fbcStats Reference to nvmlFBCStats_t structure containing NvFBC stats -* -* @return -* - \ref NVML_SUCCESS if \a fbcStats is fetched -* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized -* - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a fbcStats is NULL -* - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system -* - \ref NVML_ERROR_UNKNOWN on any unexpected error -*/ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFBCStats(nvmlVgpuInstance_t vgpuInstance, nvmlFBCStats_t *fbcStats); - -/** -* Retrieves information about active frame buffer capture sessions on a vGPU Instance. -* -* An array of active encoder sessions is returned in the caller-supplied buffer pointed at by \a sessionInfo. The -* array element count is passed in \a sessionCount, and \a sessionCount is used to return the number of sessions -* written to the buffer. -* -* If the supplied buffer is not large enough to accommodate the active session array, the function returns -* NVML_ERROR_INSUFFICIENT_SIZE, with the element count of nvmlFBCSessionInfo_t array required in \a sessionCount. -* To query the number of active FBC sessions, call this function with *sessionCount = 0. The code will return -* NVML_SUCCESS with number of active FBC sessions updated in *sessionCount. -* -* For Maxwell &tm; or newer fully supported devices. -* -* @note hResolution, vResolution, averageFPS and averageLatency data for a FBC session returned in \a sessionInfo may -* be zero if there are no new frames captured since the session started. -* -* @param vgpuInstance Identifier of the target vGPU instance -* @param sessionCount Reference to caller supplied array size, and returns the number of sessions. -* @param sessionInfo Reference in which to return the session information -* -* @return -* - \ref NVML_SUCCESS if \a sessionInfo is fetched -* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized -* - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a sessionCount is NULL. -* - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system -* - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount -* - \ref NVML_ERROR_UNKNOWN on any unexpected error -*/ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFBCSessions(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, nvmlFBCSessionInfo_t *sessionInfo); - -/** - * Retrieves the current utilization and process ID - * - * For Maxwell &tm; or newer fully supported devices. - * - * Reads recent utilization of GPU SM (3D/Compute), framebuffer, video encoder, and video decoder for processes running. - * Utilization values are returned as an array of utilization sample structures in the caller-supplied buffer pointed at - * by \a utilization. One utilization sample structure is returned per process running, that had some non-zero utilization - * during the last sample period. It includes the CPU timestamp at which the samples were recorded. Individual utilization values - * are returned as "unsigned int" values. - * - * To read utilization values, first determine the size of buffer required to hold the samples by invoking the function with - * \a utilization set to NULL. The caller should allocate a buffer of size - * processSamplesCount * sizeof(nvmlProcessUtilizationSample_t). Invoke the function again with the allocated buffer passed - * in \a utilization, and \a processSamplesCount set to the number of entries the buffer is sized for. - * - * On successful return, the function updates \a processSamplesCount with the number of process utilization sample - * structures that were actually written. This may differ from a previously read value as instances are created or - * destroyed. - * - * lastSeenTimeStamp represents the CPU timestamp in microseconds at which utilization samples were last read. Set it to 0 - * to read utilization based on all the samples maintained by the driver's internal sample buffer. Set lastSeenTimeStamp - * to a timeStamp retrieved from a previous query to read utilization since the previous query. - * - * @param device The identifier of the target device - * @param utilization Pointer to caller-supplied buffer in which guest process utilization samples are returned - * @param processSamplesCount Pointer to caller-supplied array size, and returns number of processes running - * @param lastSeenTimeStamp Return only samples with timestamp greater than lastSeenTimeStamp. - - * @return - * - \ref NVML_SUCCESS if \a utilization has been populated - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL - * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature - * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetProcessUtilization(nvmlDevice_t device, nvmlProcessUtilizationSample_t *utilization, - unsigned int *processSamplesCount, unsigned long long lastSeenTimeStamp); - -/** - * Queries the state of per process accounting mode on vGPU. - * - * For Maxwell &tm; or newer fully supported devices. - * - * @param vgpuInstance The identifier of the target vGPU VM - * @param mode Reference in which to return the current accounting mode - * - * @return - * - \ref NVML_SUCCESS if the mode has been successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a mode is NULL - * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system - * - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetAccountingMode(nvmlVgpuInstance_t vgpuInstance, nvmlEnableState_t *mode); - -/** - * Queries list of processes running on vGPU that can be queried for accounting stats. The list of processes - * returned can be in running or terminated state. - * - * For Maxwell &tm; or newer fully supported devices. - * - * To just query the maximum number of processes that can be queried, call this function with *count = 0 and - * pids=NULL. The return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if list is empty. - * - * For more details see \ref nvmlVgpuInstanceGetAccountingStats. - * - * @note In case of PID collision some processes might not be accessible before the circular buffer is full. - * - * @param vgpuInstance The identifier of the target vGPU VM - * @param count Reference in which to provide the \a pids array size, and - * to return the number of elements ready to be queried - * @param pids Reference in which to return list of process ids - * - * @return - * - \ref NVML_SUCCESS if pids were successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a count is NULL - * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system - * - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature or accounting mode is disabled - * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a count is too small (\a count is set to expected value) - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - * - * @see nvmlVgpuInstanceGetAccountingPids - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetAccountingPids(nvmlVgpuInstance_t vgpuInstance, unsigned int *count, unsigned int *pids); - -/** - * Queries process's accounting stats. - * - * For Maxwell &tm; or newer fully supported devices. - * - * Accounting stats capture GPU utilization and other statistics across the lifetime of a process, and - * can be queried during life time of the process or after its termination. - * The time field in \ref nvmlAccountingStats_t is reported as 0 during the lifetime of the process and - * updated to actual running time after its termination. - * Accounting stats are kept in a circular buffer, newly created processes overwrite information about old - * processes. - * - * See \ref nvmlAccountingStats_t for description of each returned metric. - * List of processes that can be queried can be retrieved from \ref nvmlVgpuInstanceGetAccountingPids. - * - * @note Accounting Mode needs to be on. See \ref nvmlVgpuInstanceGetAccountingMode. - * @note Only compute and graphics applications stats can be queried. Monitoring applications stats can't be - * queried since they don't contribute to GPU utilization. - * @note In case of pid collision stats of only the latest process (that terminated last) will be reported - * - * @param vgpuInstance The identifier of the target vGPU VM - * @param pid Process Id of the target process to query stats for - * @param stats Reference in which to return the process's accounting stats - * - * @return - * - \ref NVML_SUCCESS if stats have been successfully retrieved - * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a stats is NULL - * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system - * or \a stats is not found - * - \ref NVML_ERROR_NOT_SUPPORTED if the vGPU doesn't support this feature or accounting mode is disabled - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetAccountingStats(nvmlVgpuInstance_t vgpuInstance, unsigned int pid, nvmlAccountingStats_t *stats); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvml vGPU Migration - * This chapter describes NVML operations that are associated with vGPU Migration. - * @{ - */ -/***************************************************************************************************/ - -/** - * vGPU metadata structure. - */ -typedef struct nvmlVgpuMetadata_st -{ - unsigned int version; //!< Current version of the structure - unsigned int revision; //!< Current revision of the structure - nvmlVgpuGuestInfoState_t guestInfoState; //!< Current state of Guest-dependent fields - char guestDriverVersion[NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE]; //!< Version of driver installed in guest - char hostDriverVersion[NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE]; //!< Version of driver installed in host - unsigned int reserved[8]; //!< Reserved for internal use - unsigned int opaqueDataSize; //!< Size of opaque data field in bytes - char opaqueData[4]; //!< Opaque data -} nvmlVgpuMetadata_t; - -/** - * Physical GPU metadata structure - */ -typedef struct nvmlVgpuPgpuMetadata_st -{ - unsigned int version; //!< Current version of the structure - unsigned int revision; //!< Current revision of the structure - char hostDriverVersion[NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE]; //!< Host driver version - unsigned int pgpuVirtualizationCaps; //!< Pgpu virtualization capabilities bitfield - unsigned int reserved[7]; //!< Reserved for internal use - unsigned int opaqueDataSize; //!< Size of opaque data field in bytes - char opaqueData[4]; //!< Opaque data -} nvmlVgpuPgpuMetadata_t; - -/** - * vGPU VM compatibility codes - */ -typedef enum nvmlVgpuVmCompatibility_enum -{ - NVML_VGPU_VM_COMPATIBILITY_NONE = 0x0, //!< vGPU is not runnable - NVML_VGPU_VM_COMPATIBILITY_COLD = 0x1, //!< vGPU is runnable from a cold / powered-off state (ACPI S5) - NVML_VGPU_VM_COMPATIBILITY_HIBERNATE = 0x2, //!< vGPU is runnable from a hibernated state (ACPI S4) - NVML_VGPU_VM_COMPATIBILITY_SLEEP = 0x4, //!< vGPU is runnable from a sleeped state (ACPI S3) - NVML_VGPU_VM_COMPATIBILITY_LIVE = 0x8, //!< vGPU is runnable from a live/paused (ACPI S0) -} nvmlVgpuVmCompatibility_t; - -/** - * vGPU-pGPU compatibility limit codes - */ -typedef enum nvmlVgpuPgpuCompatibilityLimitCode_enum -{ - NVML_VGPU_COMPATIBILITY_LIMIT_NONE = 0x0, //!< Compatibility is not limited. - NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER = 0x1, //!< Compatibility is limited by host driver version. - NVML_VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER = 0x2, //!< Compatibility is limited by guest driver version. - NVML_VGPU_COMPATIBILITY_LIMIT_GPU = 0x4, //!< Compatibility is limited by GPU hardware. - NVML_VGPU_COMPATIBILITY_LIMIT_OTHER = 0x80000000, //!< Compatibility is limited by an undefined factor. -} nvmlVgpuPgpuCompatibilityLimitCode_t; - -/** - * vGPU-pGPU compatibility structure - */ -typedef struct nvmlVgpuPgpuCompatibility_st -{ - nvmlVgpuVmCompatibility_t vgpuVmCompatibility; //!< Compatibility of vGPU VM. See \ref nvmlVgpuVmCompatibility_t - nvmlVgpuPgpuCompatibilityLimitCode_t compatibilityLimitCode; //!< Limiting factor for vGPU-pGPU compatibility. See \ref nvmlVgpuPgpuCompatibilityLimitCode_t -} nvmlVgpuPgpuCompatibility_t; - -/** - * Returns vGPU metadata structure for a running vGPU. The structure contains information about the vGPU and its associated VM - * such as the currently installed NVIDIA guest driver version, together with host driver version and an opaque data section - * containing internal state. - * - * nvmlVgpuInstanceGetMetadata() may be called at any time for a vGPU instance. Some fields in the returned structure are - * dependent on information obtained from the guest VM, which may not yet have reached a state where that information - * is available. The current state of these dependent fields is reflected in the info structure's \ref guestInfoState field. - * - * The VMM may choose to read and save the vGPU's VM info as persistent metadata associated with the VM, and provide - * it to GRID Virtual GPU Manager when creating a vGPU for subsequent instances of the VM. - * - * The caller passes in a buffer via \a vgpuMetadata, with the size of the buffer in \a bufferSize. If the vGPU Metadata structure - * is too large to fit in the supplied buffer, the function returns NVML_ERROR_INSUFFICIENT_SIZE with the size needed - * in \a bufferSize. - * - * @param vgpuInstance vGPU instance handle - * @param vgpuMetadata Pointer to caller-supplied buffer into which vGPU metadata is written - * @param bufferSize Size of vgpuMetadata buffer - * - * @return - * - \ref NVML_SUCCESS vGPU metadata structure was successfully returned - * - \ref NVML_ERROR_INSUFFICIENT_SIZE vgpuMetadata buffer is too small, required size is returned in \a bufferSize - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a bufferSize is NULL or \a vgpuInstance is 0; if \a vgpuMetadata is NULL and the value of \a bufferSize is not 0. - * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlVgpuInstanceGetMetadata(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuMetadata_t *vgpuMetadata, unsigned int *bufferSize); - -/** - * Returns a vGPU metadata structure for the physical GPU indicated by \a device. The structure contains information about - * the GPU and the currently installed NVIDIA host driver version that's controlling it, together with an opaque data section - * containing internal state. - * - * The caller passes in a buffer via \a pgpuMetadata, with the size of the buffer in \a bufferSize. If the \a pgpuMetadata - * structure is too large to fit in the supplied buffer, the function returns NVML_ERROR_INSUFFICIENT_SIZE with the size needed - * in \a bufferSize. - * - * @param device The identifier of the target device - * @param pgpuMetadata Pointer to caller-supplied buffer into which \a pgpuMetadata is written - * @param bufferSize Pointer to size of \a pgpuMetadata buffer - * - * @return - * - \ref NVML_SUCCESS GPU metadata structure was successfully returned - * - \ref NVML_ERROR_INSUFFICIENT_SIZE pgpuMetadata buffer is too small, required size is returned in \a bufferSize - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a bufferSize is NULL or \a device is invalid; if \a pgpuMetadata is NULL and the value of \a bufferSize is not 0. - * - \ref NVML_ERROR_NOT_SUPPORTED vGPU is not supported by the system - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlDeviceGetVgpuMetadata(nvmlDevice_t device, nvmlVgpuPgpuMetadata_t *pgpuMetadata, unsigned int *bufferSize); - -/** - * Takes a vGPU instance metadata structure read from \ref nvmlVgpuInstanceGetMetadata(), and a vGPU metadata structure for a - * physical GPU read from \ref nvmlDeviceGetVgpuMetadata(), and returns compatibility information of the vGPU instance and the - * physical GPU. - * - * The caller passes in a buffer via \a compatibilityInfo, into which a compatibility information structure is written. The - * structure defines the states in which the vGPU / VM may be booted on the physical GPU. If the vGPU / VM compatibility - * with the physical GPU is limited, a limit code indicates the factor limiting compatibility. - * (see \ref nvmlVgpuPgpuCompatibilityLimitCode_t for details). - * - * Note: vGPU compatibility does not take into account dynamic capacity conditions that may limit a system's ability to - * boot a given vGPU or associated VM. - * - * @param vgpuMetadata Pointer to caller-supplied vGPU metadata structure - * @param pgpuMetadata Pointer to caller-supplied GPU metadata structure - * @param compatibilityInfo Pointer to caller-supplied buffer to hold compatibility info - * - * @return - * - \ref NVML_SUCCESS vGPU metadata structure was successfully returned - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuMetadata or \a pgpuMetadata or \a bufferSize are NULL - * - \ref NVML_ERROR_UNKNOWN on any unexpected error - */ -nvmlReturn_t DECLDIR nvmlGetVgpuCompatibility(nvmlVgpuMetadata_t *vgpuMetadata, nvmlVgpuPgpuMetadata_t *pgpuMetadata, nvmlVgpuPgpuCompatibility_t *compatibilityInfo); - -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlGpuBlacklistQueries GPU Blacklist Queries - * This chapter describes NVML operations that are associated with blacklisted GPUs. - * @{ - */ -/***************************************************************************************************/ - -/** - * Blacklist GPU device information - **/ -typedef struct nvmlBlacklistDeviceInfo_st -{ - nvmlPciInfo_t pciInfo; //!< The PCI information for the blacklisted GPU - char uuid[NVML_DEVICE_UUID_BUFFER_SIZE]; //!< The ASCII string UUID for the blacklisted GPU -} nvmlBlacklistDeviceInfo_t; - - /** - * Retrieves the number of blacklisted GPU devices in the system. - * - * For all products. - * - * @param deviceCount Reference in which to return the number of blacklisted devices - * - * @return - * - \ref NVML_SUCCESS if \a deviceCount has been set - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a deviceCount is NULL - */ -nvmlReturn_t DECLDIR nvmlGetBlacklistDeviceCount(unsigned int *deviceCount); - -/** - * Acquire the device information for a blacklisted device, based on its index. - * - * For all products. - * - * Valid indices are derived from the \a deviceCount returned by - * \ref nvmlGetBlacklistDeviceCount(). For example, if \a deviceCount is 2 the valid indices - * are 0 and 1, corresponding to GPU 0 and GPU 1. - * - * @param index The index of the target GPU, >= 0 and < \a deviceCount - * @param info Reference in which to return the device information - * - * @return - * - \ref NVML_SUCCESS if \a device has been set - * - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a info is NULL - * - * @see nvmlGetBlacklistDeviceCount - */ -nvmlReturn_t DECLDIR nvmlGetBlacklistDeviceInfoByIndex(unsigned int index, nvmlBlacklistDeviceInfo_t *info); - -/** @} */ - -/** - * NVML API versioning support - */ -#if defined(__NVML_API_VERSION_INTERNAL) -#undef nvmlDeviceRemoveGpu -#undef nvmlDeviceGetNvLinkRemotePciInfo -#undef nvmlDeviceGetPciInfo -#undef nvmlDeviceGetCount -#undef nvmlDeviceGetHandleByIndex -#undef nvmlDeviceGetHandleByPciBusId -#undef nvmlInit -#endif - -#ifdef __cplusplus -} -#endif - -#endif diff --git a/include/triton/external/half.hpp b/include/triton/external/half.hpp deleted file mode 100644 index 4b6c305c147c..000000000000 --- a/include/triton/external/half.hpp +++ /dev/null @@ -1,3067 +0,0 @@ -// half - IEEE 754-based half-precision floating point library. -// -// Copyright (c) 2012-2017 Christian Rau -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation -// files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, -// modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the -// Software is furnished to do so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE -// WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -// Version 1.12.0 - -/// \file -/// Main header file for half precision functionality. - -#ifndef HALF_HALF_HPP -#define HALF_HALF_HPP - -/// Combined gcc version number. -#define HALF_GNUC_VERSION (__GNUC__*100+__GNUC_MINOR__) - -//check C++11 language features -#if defined(__clang__) //clang - #if __has_feature(cxx_static_assert) && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT) - #define HALF_ENABLE_CPP11_STATIC_ASSERT 1 - #endif - #if __has_feature(cxx_constexpr) && !defined(HALF_ENABLE_CPP11_CONSTEXPR) - #define HALF_ENABLE_CPP11_CONSTEXPR 1 - #endif - #if __has_feature(cxx_noexcept) && !defined(HALF_ENABLE_CPP11_NOEXCEPT) - #define HALF_ENABLE_CPP11_NOEXCEPT 1 - #endif - #if __has_feature(cxx_user_literals) && !defined(HALF_ENABLE_CPP11_USER_LITERALS) - #define HALF_ENABLE_CPP11_USER_LITERALS 1 - #endif - #if (defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L) && !defined(HALF_ENABLE_CPP11_LONG_LONG) - #define HALF_ENABLE_CPP11_LONG_LONG 1 - #endif -/*#elif defined(__INTEL_COMPILER) //Intel C++ - #if __INTEL_COMPILER >= 1100 && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT) ???????? - #define HALF_ENABLE_CPP11_STATIC_ASSERT 1 - #endif - #if __INTEL_COMPILER >= 1300 && !defined(HALF_ENABLE_CPP11_CONSTEXPR) ???????? - #define HALF_ENABLE_CPP11_CONSTEXPR 1 - #endif - #if __INTEL_COMPILER >= 1300 && !defined(HALF_ENABLE_CPP11_NOEXCEPT) ???????? - #define HALF_ENABLE_CPP11_NOEXCEPT 1 - #endif - #if __INTEL_COMPILER >= 1100 && !defined(HALF_ENABLE_CPP11_LONG_LONG) ???????? - #define HALF_ENABLE_CPP11_LONG_LONG 1 - #endif*/ -#elif defined(__GNUC__) //gcc - #if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103L - #if HALF_GNUC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT) - #define HALF_ENABLE_CPP11_STATIC_ASSERT 1 - #endif - #if HALF_GNUC_VERSION >= 406 && !defined(HALF_ENABLE_CPP11_CONSTEXPR) - #define HALF_ENABLE_CPP11_CONSTEXPR 1 - #endif - #if HALF_GNUC_VERSION >= 406 && !defined(HALF_ENABLE_CPP11_NOEXCEPT) - #define HALF_ENABLE_CPP11_NOEXCEPT 1 - #endif - #if HALF_GNUC_VERSION >= 407 && !defined(HALF_ENABLE_CPP11_USER_LITERALS) - #define HALF_ENABLE_CPP11_USER_LITERALS 1 - #endif - #if !defined(HALF_ENABLE_CPP11_LONG_LONG) - #define HALF_ENABLE_CPP11_LONG_LONG 1 - #endif - #endif -#elif defined(_MSC_VER) //Visual C++ - #if _MSC_VER >= 1900 && !defined(HALF_ENABLE_CPP11_CONSTEXPR) - #define HALF_ENABLE_CPP11_CONSTEXPR 1 - #endif - #if _MSC_VER >= 1900 && !defined(HALF_ENABLE_CPP11_NOEXCEPT) - #define HALF_ENABLE_CPP11_NOEXCEPT 1 - #endif - #if _MSC_VER >= 1900 && !defined(HALF_ENABLE_CPP11_USER_LITERALS) - #define HALF_ENABLE_CPP11_USER_LITERALS 1 - #endif - #if _MSC_VER >= 1600 && !defined(HALF_ENABLE_CPP11_STATIC_ASSERT) - #define HALF_ENABLE_CPP11_STATIC_ASSERT 1 - #endif - #if _MSC_VER >= 1310 && !defined(HALF_ENABLE_CPP11_LONG_LONG) - #define HALF_ENABLE_CPP11_LONG_LONG 1 - #endif - #define HALF_POP_WARNINGS 1 - #pragma warning(push) - #pragma warning(disable : 4099 4127 4146) //struct vs class, constant in if, negative unsigned -#endif - -//check C++11 library features -#include -#if defined(_LIBCPP_VERSION) //libc++ - #if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103 - #ifndef HALF_ENABLE_CPP11_TYPE_TRAITS - #define HALF_ENABLE_CPP11_TYPE_TRAITS 1 - #endif - #ifndef HALF_ENABLE_CPP11_CSTDINT - #define HALF_ENABLE_CPP11_CSTDINT 1 - #endif - #ifndef HALF_ENABLE_CPP11_CMATH - #define HALF_ENABLE_CPP11_CMATH 1 - #endif - #ifndef HALF_ENABLE_CPP11_HASH - #define HALF_ENABLE_CPP11_HASH 1 - #endif - #endif -#elif defined(__GLIBCXX__) //libstdc++ - #if defined(__GXX_EXPERIMENTAL_CXX0X__) || __cplusplus >= 201103 - #ifdef __clang__ - #if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_TYPE_TRAITS) - #define HALF_ENABLE_CPP11_TYPE_TRAITS 1 - #endif - #if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_CSTDINT) - #define HALF_ENABLE_CPP11_CSTDINT 1 - #endif - #if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_CMATH) - #define HALF_ENABLE_CPP11_CMATH 1 - #endif - #if __GLIBCXX__ >= 20080606 && !defined(HALF_ENABLE_CPP11_HASH) - #define HALF_ENABLE_CPP11_HASH 1 - #endif - #else - #if HALF_GNUC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_CSTDINT) - #define HALF_ENABLE_CPP11_CSTDINT 1 - #endif - #if HALF_GNUC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_CMATH) - #define HALF_ENABLE_CPP11_CMATH 1 - #endif - #if HALF_GNUC_VERSION >= 403 && !defined(HALF_ENABLE_CPP11_HASH) - #define HALF_ENABLE_CPP11_HASH 1 - #endif - #endif - #endif -#elif defined(_CPPLIB_VER) //Dinkumware/Visual C++ - #if _CPPLIB_VER >= 520 - #ifndef HALF_ENABLE_CPP11_TYPE_TRAITS - #define HALF_ENABLE_CPP11_TYPE_TRAITS 1 - #endif - #ifndef HALF_ENABLE_CPP11_CSTDINT - #define HALF_ENABLE_CPP11_CSTDINT 1 - #endif - #ifndef HALF_ENABLE_CPP11_HASH - #define HALF_ENABLE_CPP11_HASH 1 - #endif - #endif - #if _CPPLIB_VER >= 610 - #ifndef HALF_ENABLE_CPP11_CMATH - #define HALF_ENABLE_CPP11_CMATH 1 - #endif - #endif -#endif -#undef HALF_GNUC_VERSION - -//support constexpr -#if HALF_ENABLE_CPP11_CONSTEXPR - #define HALF_CONSTEXPR constexpr - #define HALF_CONSTEXPR_CONST constexpr -#else - #define HALF_CONSTEXPR - #define HALF_CONSTEXPR_CONST const -#endif - -//support noexcept -#if HALF_ENABLE_CPP11_NOEXCEPT - #define HALF_NOEXCEPT noexcept - #define HALF_NOTHROW noexcept -#else - #define HALF_NOEXCEPT - #define HALF_NOTHROW throw() -#endif - -#include -#include -#include -#include -#include -#include -#if HALF_ENABLE_CPP11_TYPE_TRAITS - #include -#endif -#if HALF_ENABLE_CPP11_CSTDINT - #include -#endif -#if HALF_ENABLE_CPP11_HASH - #include -#endif - - -/// Default rounding mode. -/// This specifies the rounding mode used for all conversions between [half](\ref half_float::half)s and `float`s as well as -/// for the half_cast() if not specifying a rounding mode explicitly. It can be redefined (before including half.hpp) to one -/// of the standard rounding modes using their respective constants or the equivalent values of `std::float_round_style`: -/// -/// `std::float_round_style` | value | rounding -/// ---------------------------------|-------|------------------------- -/// `std::round_indeterminate` | -1 | fastest (default) -/// `std::round_toward_zero` | 0 | toward zero -/// `std::round_to_nearest` | 1 | to nearest -/// `std::round_toward_infinity` | 2 | toward positive infinity -/// `std::round_toward_neg_infinity` | 3 | toward negative infinity -/// -/// By default this is set to `-1` (`std::round_indeterminate`), which uses truncation (round toward zero, but with overflows -/// set to infinity) and is the fastest rounding mode possible. It can even be set to `std::numeric_limits::round_style` -/// to synchronize the rounding mode with that of the underlying single-precision implementation. -#ifndef HALF_ROUND_STYLE - #define HALF_ROUND_STYLE -1 // = std::round_indeterminate -#endif - -/// Tie-breaking behaviour for round to nearest. -/// This specifies if ties in round to nearest should be resolved by rounding to the nearest even value. By default this is -/// defined to `0` resulting in the faster but slightly more biased behaviour of rounding away from zero in half-way cases (and -/// thus equal to the round() function), but can be redefined to `1` (before including half.hpp) if more IEEE-conformant -/// behaviour is needed. -#ifndef HALF_ROUND_TIES_TO_EVEN - #define HALF_ROUND_TIES_TO_EVEN 0 // ties away from zero -#endif - -/// Value signaling overflow. -/// In correspondence with `HUGE_VAL[F|L]` from `` this symbol expands to a positive value signaling the overflow of an -/// operation, in particular it just evaluates to positive infinity. -#define HUGE_VALH std::numeric_limits::infinity() - -/// Fast half-precision fma function. -/// This symbol is only defined if the fma() function generally executes as fast as, or faster than, a separate -/// half-precision multiplication followed by an addition. Due to the internal single-precision implementation of all -/// arithmetic operations, this is in fact always the case. -#define FP_FAST_FMAH 1 - -#ifndef FP_ILOGB0 - #define FP_ILOGB0 INT_MIN -#endif -#ifndef FP_ILOGBNAN - #define FP_ILOGBNAN INT_MAX -#endif -#ifndef FP_SUBNORMAL - #define FP_SUBNORMAL 0 -#endif -#ifndef FP_ZERO - #define FP_ZERO 1 -#endif -#ifndef FP_NAN - #define FP_NAN 2 -#endif -#ifndef FP_INFINITE - #define FP_INFINITE 3 -#endif -#ifndef FP_NORMAL - #define FP_NORMAL 4 -#endif - - -/// Main namespace for half precision functionality. -/// This namespace contains all the functionality provided by the library. -namespace half_float -{ - class half; - -#if HALF_ENABLE_CPP11_USER_LITERALS - /// Library-defined half-precision literals. - /// Import this namespace to enable half-precision floating point literals: - /// ~~~~{.cpp} - /// using namespace half_float::literal; - /// half_float::half = 4.2_h; - /// ~~~~ - namespace literal - { - half operator""_h(long double); - } -#endif - - /// \internal - /// \brief Implementation details. - namespace detail - { - #if HALF_ENABLE_CPP11_TYPE_TRAITS - /// Conditional type. - template struct conditional : std::conditional {}; - - /// Helper for tag dispatching. - template struct bool_type : std::integral_constant {}; - using std::true_type; - using std::false_type; - - /// Type traits for floating point types. - template struct is_float : std::is_floating_point {}; - #else - /// Conditional type. - template struct conditional { typedef T type; }; - template struct conditional { typedef F type; }; - - /// Helper for tag dispatching. - template struct bool_type {}; - typedef bool_type true_type; - typedef bool_type false_type; - - /// Type traits for floating point types. - template struct is_float : false_type {}; - template struct is_float : is_float {}; - template struct is_float : is_float {}; - template struct is_float : is_float {}; - template<> struct is_float : true_type {}; - template<> struct is_float : true_type {}; - template<> struct is_float : true_type {}; - #endif - - /// Type traits for floating point bits. - template struct bits { typedef unsigned char type; }; - template struct bits : bits {}; - template struct bits : bits {}; - template struct bits : bits {}; - - #if HALF_ENABLE_CPP11_CSTDINT - /// Unsigned integer of (at least) 16 bits width. - typedef std::uint_least16_t uint16; - - /// Unsigned integer of (at least) 32 bits width. - template<> struct bits { typedef std::uint_least32_t type; }; - - /// Unsigned integer of (at least) 64 bits width. - template<> struct bits { typedef std::uint_least64_t type; }; - #else - /// Unsigned integer of (at least) 16 bits width. - typedef unsigned short uint16; - - /// Unsigned integer of (at least) 32 bits width. - template<> struct bits : conditional::digits>=32,unsigned int,unsigned long> {}; - - #if HALF_ENABLE_CPP11_LONG_LONG - /// Unsigned integer of (at least) 64 bits width. - template<> struct bits : conditional::digits>=64,unsigned long,unsigned long long> {}; - #else - /// Unsigned integer of (at least) 64 bits width. - template<> struct bits { typedef unsigned long type; }; - #endif - #endif - - /// Tag type for binary construction. - struct binary_t {}; - - /// Tag for binary construction. - HALF_CONSTEXPR_CONST binary_t binary = binary_t(); - - /// Temporary half-precision expression. - /// This class represents a half-precision expression which just stores a single-precision value internally. - struct expr - { - /// Conversion constructor. - /// \param f single-precision value to convert - explicit HALF_CONSTEXPR expr(float f) HALF_NOEXCEPT : value_(f) {} - - /// Conversion to single-precision. - /// \return single precision value representing expression value - HALF_CONSTEXPR operator float() const HALF_NOEXCEPT { return value_; } - - private: - /// Internal expression value stored in single-precision. - float value_; - }; - - /// SFINAE helper for generic half-precision functions. - /// This class template has to be specialized for each valid combination of argument types to provide a corresponding - /// `type` member equivalent to \a T. - /// \tparam T type to return - template struct enable {}; - template struct enable { typedef T type; }; - template struct enable { typedef T type; }; - template struct enable { typedef T type; }; - template struct enable { typedef T type; }; - template struct enable { typedef T type; }; - template struct enable { typedef T type; }; - template struct enable { typedef T type; }; - template struct enable { typedef T type; }; - template struct enable { typedef T type; }; - template struct enable { typedef T type; }; - template struct enable { typedef T type; }; - template struct enable { typedef T type; }; - template struct enable { typedef T type; }; - template struct enable { typedef T type; }; - - /// Return type for specialized generic 2-argument half-precision functions. - /// This class template has to be specialized for each valid combination of argument types to provide a corresponding - /// `type` member denoting the appropriate return type. - /// \tparam T first argument type - /// \tparam U first argument type - template struct result : enable {}; - template<> struct result { typedef half type; }; - - /// \name Classification helpers - /// \{ - - /// Check for infinity. - /// \tparam T argument type (builtin floating point type) - /// \param arg value to query - /// \retval true if infinity - /// \retval false else - template bool builtin_isinf(T arg) - { - #if HALF_ENABLE_CPP11_CMATH - return std::isinf(arg); - #elif defined(_MSC_VER) - return !::_finite(static_cast(arg)) && !::_isnan(static_cast(arg)); - #else - return arg == std::numeric_limits::infinity() || arg == -std::numeric_limits::infinity(); - #endif - } - - /// Check for NaN. - /// \tparam T argument type (builtin floating point type) - /// \param arg value to query - /// \retval true if not a number - /// \retval false else - template bool builtin_isnan(T arg) - { - #if HALF_ENABLE_CPP11_CMATH - return std::isnan(arg); - #elif defined(_MSC_VER) - return ::_isnan(static_cast(arg)) != 0; - #else - return arg != arg; - #endif - } - - /// Check sign. - /// \tparam T argument type (builtin floating point type) - /// \param arg value to query - /// \retval true if signbit set - /// \retval false else - template bool builtin_signbit(T arg) - { - #if HALF_ENABLE_CPP11_CMATH - return std::signbit(arg); - #else - return arg < T() || (arg == T() && T(1)/arg < T()); - #endif - } - - /// \} - /// \name Conversion - /// \{ - - /// Convert IEEE single-precision to half-precision. - /// Credit for this goes to [Jeroen van der Zijp](ftp://ftp.fox-toolkit.org/pub/fasthalffloatconversion.pdf). - /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding - /// \param value single-precision value - /// \return binary representation of half-precision value - template uint16 float2half_impl(float value, true_type) - { - typedef bits::type uint32; - uint32 bits;// = *reinterpret_cast(&value); //violating strict aliasing! - std::memcpy(&bits, &value, sizeof(float)); -/* uint16 hbits = (bits>>16) & 0x8000; - bits &= 0x7FFFFFFF; - int exp = bits >> 23; - if(exp == 255) - return hbits | 0x7C00 | (0x3FF&-static_cast((bits&0x7FFFFF)!=0)); - if(exp > 142) - { - if(R == std::round_toward_infinity) - return hbits | 0x7C00 - (hbits>>15); - if(R == std::round_toward_neg_infinity) - return hbits | 0x7BFF + (hbits>>15); - return hbits | 0x7BFF + (R!=std::round_toward_zero); - } - int g, s; - if(exp > 112) - { - g = (bits>>12) & 1; - s = (bits&0xFFF) != 0; - hbits |= ((exp-112)<<10) | ((bits>>13)&0x3FF); - } - else if(exp > 101) - { - int i = 125 - exp; - bits = (bits&0x7FFFFF) | 0x800000; - g = (bits>>i) & 1; - s = (bits&((1L<> (i+1); - } - else - { - g = 0; - s = bits != 0; - } - if(R == std::round_to_nearest) - #if HALF_ROUND_TIES_TO_EVEN - hbits += g & (s|hbits); - #else - hbits += g; - #endif - else if(R == std::round_toward_infinity) - hbits += ~(hbits>>15) & (s|g); - else if(R == std::round_toward_neg_infinity) - hbits += (hbits>>15) & (g|s); -*/ static const uint16 base_table[512] = { - 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, - 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0008, 0x0010, 0x0020, 0x0040, 0x0080, 0x0100, - 0x0200, 0x0400, 0x0800, 0x0C00, 0x1000, 0x1400, 0x1800, 0x1C00, 0x2000, 0x2400, 0x2800, 0x2C00, 0x3000, 0x3400, 0x3800, 0x3C00, - 0x4000, 0x4400, 0x4800, 0x4C00, 0x5000, 0x5400, 0x5800, 0x5C00, 0x6000, 0x6400, 0x6800, 0x6C00, 0x7000, 0x7400, 0x7800, 0x7C00, - 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, - 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, - 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, - 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, - 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, - 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, - 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, 0x7C00, - 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, - 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, - 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, - 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, - 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, - 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, - 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8000, 0x8001, 0x8002, 0x8004, 0x8008, 0x8010, 0x8020, 0x8040, 0x8080, 0x8100, - 0x8200, 0x8400, 0x8800, 0x8C00, 0x9000, 0x9400, 0x9800, 0x9C00, 0xA000, 0xA400, 0xA800, 0xAC00, 0xB000, 0xB400, 0xB800, 0xBC00, - 0xC000, 0xC400, 0xC800, 0xCC00, 0xD000, 0xD400, 0xD800, 0xDC00, 0xE000, 0xE400, 0xE800, 0xEC00, 0xF000, 0xF400, 0xF800, 0xFC00, - 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, - 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, - 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, - 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, - 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, - 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, - 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00, 0xFC00 }; - static const unsigned char shift_table[512] = { - 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, - 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 13, - 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 23, 22, 21, 20, 19, 18, 17, 16, 15, 14, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, - 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, - 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 24, 13 }; - uint16 hbits = base_table[bits>>23] + static_cast((bits&0x7FFFFF)>>shift_table[bits>>23]); - if(R == std::round_to_nearest) - hbits += (((bits&0x7FFFFF)>>(shift_table[bits>>23]-1))|(((bits>>23)&0xFF)==102)) & ((hbits&0x7C00)!=0x7C00) - #if HALF_ROUND_TIES_TO_EVEN - & (((((static_cast(1)<<(shift_table[bits>>23]-1))-1)&bits)!=0)|hbits) - #endif - ; - else if(R == std::round_toward_zero) - hbits -= ((hbits&0x7FFF)==0x7C00) & ~shift_table[bits>>23]; - else if(R == std::round_toward_infinity) - hbits += ((((bits&0x7FFFFF&((static_cast(1)<<(shift_table[bits>>23]))-1))!=0)|(((bits>>23)<=102)& - ((bits>>23)!=0)))&(hbits<0x7C00)) - ((hbits==0xFC00)&((bits>>23)!=511)); - else if(R == std::round_toward_neg_infinity) - hbits += ((((bits&0x7FFFFF&((static_cast(1)<<(shift_table[bits>>23]))-1))!=0)|(((bits>>23)<=358)& - ((bits>>23)!=256)))&(hbits<0xFC00)&(hbits>>15)) - ((hbits==0x7C00)&((bits>>23)!=255)); - return hbits; - } - - /// Convert IEEE double-precision to half-precision. - /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding - /// \param value double-precision value - /// \return binary representation of half-precision value - template uint16 float2half_impl(double value, true_type) - { - typedef bits::type uint32; - typedef bits::type uint64; - uint64 bits;// = *reinterpret_cast(&value); //violating strict aliasing! - std::memcpy(&bits, &value, sizeof(double)); - uint32 hi = bits >> 32, lo = bits & 0xFFFFFFFF; - uint16 hbits = (hi>>16) & 0x8000; - hi &= 0x7FFFFFFF; - int exp = hi >> 20; - if(exp == 2047) - return hbits | 0x7C00 | (0x3FF&-static_cast((bits&0xFFFFFFFFFFFFF)!=0)); - if(exp > 1038) - { - if(R == std::round_toward_infinity) - return hbits | 0x7C00 - (hbits>>15); - if(R == std::round_toward_neg_infinity) - return hbits | 0x7BFF + (hbits>>15); - return hbits | 0x7BFF + (R!=std::round_toward_zero); - } - int g, s = lo != 0; - if(exp > 1008) - { - g = (hi>>9) & 1; - s |= (hi&0x1FF) != 0; - hbits |= ((exp-1008)<<10) | ((hi>>10)&0x3FF); - } - else if(exp > 997) - { - int i = 1018 - exp; - hi = (hi&0xFFFFF) | 0x100000; - g = (hi>>i) & 1; - s |= (hi&((1L<> (i+1); - } - else - { - g = 0; - s |= hi != 0; - } - if(R == std::round_to_nearest) - #if HALF_ROUND_TIES_TO_EVEN - hbits += g & (s|hbits); - #else - hbits += g; - #endif - else if(R == std::round_toward_infinity) - hbits += ~(hbits>>15) & (s|g); - else if(R == std::round_toward_neg_infinity) - hbits += (hbits>>15) & (g|s); - return hbits; - } - - /// Convert non-IEEE floating point to half-precision. - /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding - /// \tparam T source type (builtin floating point type) - /// \param value floating point value - /// \return binary representation of half-precision value - template uint16 float2half_impl(T value, ...) - { - uint16 hbits = static_cast(builtin_signbit(value)) << 15; - if(value == T()) - return hbits; - if(builtin_isnan(value)) - return hbits | 0x7FFF; - if(builtin_isinf(value)) - return hbits | 0x7C00; - int exp; - std::frexp(value, &exp); - if(exp > 16) - { - if(R == std::round_toward_infinity) - return hbits | 0x7C00 - (hbits>>15); - else if(R == std::round_toward_neg_infinity) - return hbits | 0x7BFF + (hbits>>15); - return hbits | 0x7BFF + (R!=std::round_toward_zero); - } - if(exp < -13) - value = std::ldexp(value, 24); - else - { - value = std::ldexp(value, 11-exp); - hbits |= ((exp+13)<<10); - } - T ival, frac = std::modf(value, &ival); - hbits += static_cast(std::abs(static_cast(ival))); - if(R == std::round_to_nearest) - { - frac = std::abs(frac); - #if HALF_ROUND_TIES_TO_EVEN - hbits += (frac>T(0.5)) | ((frac==T(0.5))&hbits); - #else - hbits += frac >= T(0.5); - #endif - } - else if(R == std::round_toward_infinity) - hbits += frac > T(); - else if(R == std::round_toward_neg_infinity) - hbits += frac < T(); - return hbits; - } - - /// Convert floating point to half-precision. - /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding - /// \tparam T source type (builtin floating point type) - /// \param value floating point value - /// \return binary representation of half-precision value - template uint16 float2half(T value) - { - return float2half_impl(value, bool_type::is_iec559&&sizeof(typename bits::type)==sizeof(T)>()); - } - - /// Convert integer to half-precision floating point. - /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding - /// \tparam S `true` if value negative, `false` else - /// \tparam T type to convert (builtin integer type) - /// \param value non-negative integral value - /// \return binary representation of half-precision value - template uint16 int2half_impl(T value) - { - #if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS - static_assert(std::is_integral::value, "int to half conversion only supports builtin integer types"); - #endif - if(S) - value = -value; - uint16 bits = S << 15; - if(value > 0xFFFF) - { - if(R == std::round_toward_infinity) - bits |= 0x7C00 - S; - else if(R == std::round_toward_neg_infinity) - bits |= 0x7BFF + S; - else - bits |= 0x7BFF + (R!=std::round_toward_zero); - } - else if(value) - { - unsigned int m = value, exp = 24; - for(; m<0x400; m<<=1,--exp) ; - for(; m>0x7FF; m>>=1,++exp) ; - bits |= (exp<<10) + m; - if(exp > 24) - { - if(R == std::round_to_nearest) - bits += (value>>(exp-25)) & 1 - #if HALF_ROUND_TIES_TO_EVEN - & (((((1<<(exp-25))-1)&value)!=0)|bits) - #endif - ; - else if(R == std::round_toward_infinity) - bits += ((value&((1<<(exp-24))-1))!=0) & !S; - else if(R == std::round_toward_neg_infinity) - bits += ((value&((1<<(exp-24))-1))!=0) & S; - } - } - return bits; - } - - /// Convert integer to half-precision floating point. - /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding - /// \tparam T type to convert (builtin integer type) - /// \param value integral value - /// \return binary representation of half-precision value - template uint16 int2half(T value) - { - return (value<0) ? int2half_impl(value) : int2half_impl(value); - } - - /// Convert half-precision to IEEE single-precision. - /// Credit for this goes to [Jeroen van der Zijp](ftp://ftp.fox-toolkit.org/pub/fasthalffloatconversion.pdf). - /// \param value binary representation of half-precision value - /// \return single-precision value - inline float half2float_impl(uint16 value, float, true_type) - { - typedef bits::type uint32; -/* uint32 bits = static_cast(value&0x8000) << 16; - int abs = value & 0x7FFF; - if(abs) - { - bits |= 0x38000000 << static_cast(abs>=0x7C00); - for(; abs<0x400; abs<<=1,bits-=0x800000) ; - bits += static_cast(abs) << 13; - } -*/ static const uint32 mantissa_table[2048] = { - 0x00000000, 0x33800000, 0x34000000, 0x34400000, 0x34800000, 0x34A00000, 0x34C00000, 0x34E00000, 0x35000000, 0x35100000, 0x35200000, 0x35300000, 0x35400000, 0x35500000, 0x35600000, 0x35700000, - 0x35800000, 0x35880000, 0x35900000, 0x35980000, 0x35A00000, 0x35A80000, 0x35B00000, 0x35B80000, 0x35C00000, 0x35C80000, 0x35D00000, 0x35D80000, 0x35E00000, 0x35E80000, 0x35F00000, 0x35F80000, - 0x36000000, 0x36040000, 0x36080000, 0x360C0000, 0x36100000, 0x36140000, 0x36180000, 0x361C0000, 0x36200000, 0x36240000, 0x36280000, 0x362C0000, 0x36300000, 0x36340000, 0x36380000, 0x363C0000, - 0x36400000, 0x36440000, 0x36480000, 0x364C0000, 0x36500000, 0x36540000, 0x36580000, 0x365C0000, 0x36600000, 0x36640000, 0x36680000, 0x366C0000, 0x36700000, 0x36740000, 0x36780000, 0x367C0000, - 0x36800000, 0x36820000, 0x36840000, 0x36860000, 0x36880000, 0x368A0000, 0x368C0000, 0x368E0000, 0x36900000, 0x36920000, 0x36940000, 0x36960000, 0x36980000, 0x369A0000, 0x369C0000, 0x369E0000, - 0x36A00000, 0x36A20000, 0x36A40000, 0x36A60000, 0x36A80000, 0x36AA0000, 0x36AC0000, 0x36AE0000, 0x36B00000, 0x36B20000, 0x36B40000, 0x36B60000, 0x36B80000, 0x36BA0000, 0x36BC0000, 0x36BE0000, - 0x36C00000, 0x36C20000, 0x36C40000, 0x36C60000, 0x36C80000, 0x36CA0000, 0x36CC0000, 0x36CE0000, 0x36D00000, 0x36D20000, 0x36D40000, 0x36D60000, 0x36D80000, 0x36DA0000, 0x36DC0000, 0x36DE0000, - 0x36E00000, 0x36E20000, 0x36E40000, 0x36E60000, 0x36E80000, 0x36EA0000, 0x36EC0000, 0x36EE0000, 0x36F00000, 0x36F20000, 0x36F40000, 0x36F60000, 0x36F80000, 0x36FA0000, 0x36FC0000, 0x36FE0000, - 0x37000000, 0x37010000, 0x37020000, 0x37030000, 0x37040000, 0x37050000, 0x37060000, 0x37070000, 0x37080000, 0x37090000, 0x370A0000, 0x370B0000, 0x370C0000, 0x370D0000, 0x370E0000, 0x370F0000, - 0x37100000, 0x37110000, 0x37120000, 0x37130000, 0x37140000, 0x37150000, 0x37160000, 0x37170000, 0x37180000, 0x37190000, 0x371A0000, 0x371B0000, 0x371C0000, 0x371D0000, 0x371E0000, 0x371F0000, - 0x37200000, 0x37210000, 0x37220000, 0x37230000, 0x37240000, 0x37250000, 0x37260000, 0x37270000, 0x37280000, 0x37290000, 0x372A0000, 0x372B0000, 0x372C0000, 0x372D0000, 0x372E0000, 0x372F0000, - 0x37300000, 0x37310000, 0x37320000, 0x37330000, 0x37340000, 0x37350000, 0x37360000, 0x37370000, 0x37380000, 0x37390000, 0x373A0000, 0x373B0000, 0x373C0000, 0x373D0000, 0x373E0000, 0x373F0000, - 0x37400000, 0x37410000, 0x37420000, 0x37430000, 0x37440000, 0x37450000, 0x37460000, 0x37470000, 0x37480000, 0x37490000, 0x374A0000, 0x374B0000, 0x374C0000, 0x374D0000, 0x374E0000, 0x374F0000, - 0x37500000, 0x37510000, 0x37520000, 0x37530000, 0x37540000, 0x37550000, 0x37560000, 0x37570000, 0x37580000, 0x37590000, 0x375A0000, 0x375B0000, 0x375C0000, 0x375D0000, 0x375E0000, 0x375F0000, - 0x37600000, 0x37610000, 0x37620000, 0x37630000, 0x37640000, 0x37650000, 0x37660000, 0x37670000, 0x37680000, 0x37690000, 0x376A0000, 0x376B0000, 0x376C0000, 0x376D0000, 0x376E0000, 0x376F0000, - 0x37700000, 0x37710000, 0x37720000, 0x37730000, 0x37740000, 0x37750000, 0x37760000, 0x37770000, 0x37780000, 0x37790000, 0x377A0000, 0x377B0000, 0x377C0000, 0x377D0000, 0x377E0000, 0x377F0000, - 0x37800000, 0x37808000, 0x37810000, 0x37818000, 0x37820000, 0x37828000, 0x37830000, 0x37838000, 0x37840000, 0x37848000, 0x37850000, 0x37858000, 0x37860000, 0x37868000, 0x37870000, 0x37878000, - 0x37880000, 0x37888000, 0x37890000, 0x37898000, 0x378A0000, 0x378A8000, 0x378B0000, 0x378B8000, 0x378C0000, 0x378C8000, 0x378D0000, 0x378D8000, 0x378E0000, 0x378E8000, 0x378F0000, 0x378F8000, - 0x37900000, 0x37908000, 0x37910000, 0x37918000, 0x37920000, 0x37928000, 0x37930000, 0x37938000, 0x37940000, 0x37948000, 0x37950000, 0x37958000, 0x37960000, 0x37968000, 0x37970000, 0x37978000, - 0x37980000, 0x37988000, 0x37990000, 0x37998000, 0x379A0000, 0x379A8000, 0x379B0000, 0x379B8000, 0x379C0000, 0x379C8000, 0x379D0000, 0x379D8000, 0x379E0000, 0x379E8000, 0x379F0000, 0x379F8000, - 0x37A00000, 0x37A08000, 0x37A10000, 0x37A18000, 0x37A20000, 0x37A28000, 0x37A30000, 0x37A38000, 0x37A40000, 0x37A48000, 0x37A50000, 0x37A58000, 0x37A60000, 0x37A68000, 0x37A70000, 0x37A78000, - 0x37A80000, 0x37A88000, 0x37A90000, 0x37A98000, 0x37AA0000, 0x37AA8000, 0x37AB0000, 0x37AB8000, 0x37AC0000, 0x37AC8000, 0x37AD0000, 0x37AD8000, 0x37AE0000, 0x37AE8000, 0x37AF0000, 0x37AF8000, - 0x37B00000, 0x37B08000, 0x37B10000, 0x37B18000, 0x37B20000, 0x37B28000, 0x37B30000, 0x37B38000, 0x37B40000, 0x37B48000, 0x37B50000, 0x37B58000, 0x37B60000, 0x37B68000, 0x37B70000, 0x37B78000, - 0x37B80000, 0x37B88000, 0x37B90000, 0x37B98000, 0x37BA0000, 0x37BA8000, 0x37BB0000, 0x37BB8000, 0x37BC0000, 0x37BC8000, 0x37BD0000, 0x37BD8000, 0x37BE0000, 0x37BE8000, 0x37BF0000, 0x37BF8000, - 0x37C00000, 0x37C08000, 0x37C10000, 0x37C18000, 0x37C20000, 0x37C28000, 0x37C30000, 0x37C38000, 0x37C40000, 0x37C48000, 0x37C50000, 0x37C58000, 0x37C60000, 0x37C68000, 0x37C70000, 0x37C78000, - 0x37C80000, 0x37C88000, 0x37C90000, 0x37C98000, 0x37CA0000, 0x37CA8000, 0x37CB0000, 0x37CB8000, 0x37CC0000, 0x37CC8000, 0x37CD0000, 0x37CD8000, 0x37CE0000, 0x37CE8000, 0x37CF0000, 0x37CF8000, - 0x37D00000, 0x37D08000, 0x37D10000, 0x37D18000, 0x37D20000, 0x37D28000, 0x37D30000, 0x37D38000, 0x37D40000, 0x37D48000, 0x37D50000, 0x37D58000, 0x37D60000, 0x37D68000, 0x37D70000, 0x37D78000, - 0x37D80000, 0x37D88000, 0x37D90000, 0x37D98000, 0x37DA0000, 0x37DA8000, 0x37DB0000, 0x37DB8000, 0x37DC0000, 0x37DC8000, 0x37DD0000, 0x37DD8000, 0x37DE0000, 0x37DE8000, 0x37DF0000, 0x37DF8000, - 0x37E00000, 0x37E08000, 0x37E10000, 0x37E18000, 0x37E20000, 0x37E28000, 0x37E30000, 0x37E38000, 0x37E40000, 0x37E48000, 0x37E50000, 0x37E58000, 0x37E60000, 0x37E68000, 0x37E70000, 0x37E78000, - 0x37E80000, 0x37E88000, 0x37E90000, 0x37E98000, 0x37EA0000, 0x37EA8000, 0x37EB0000, 0x37EB8000, 0x37EC0000, 0x37EC8000, 0x37ED0000, 0x37ED8000, 0x37EE0000, 0x37EE8000, 0x37EF0000, 0x37EF8000, - 0x37F00000, 0x37F08000, 0x37F10000, 0x37F18000, 0x37F20000, 0x37F28000, 0x37F30000, 0x37F38000, 0x37F40000, 0x37F48000, 0x37F50000, 0x37F58000, 0x37F60000, 0x37F68000, 0x37F70000, 0x37F78000, - 0x37F80000, 0x37F88000, 0x37F90000, 0x37F98000, 0x37FA0000, 0x37FA8000, 0x37FB0000, 0x37FB8000, 0x37FC0000, 0x37FC8000, 0x37FD0000, 0x37FD8000, 0x37FE0000, 0x37FE8000, 0x37FF0000, 0x37FF8000, - 0x38000000, 0x38004000, 0x38008000, 0x3800C000, 0x38010000, 0x38014000, 0x38018000, 0x3801C000, 0x38020000, 0x38024000, 0x38028000, 0x3802C000, 0x38030000, 0x38034000, 0x38038000, 0x3803C000, - 0x38040000, 0x38044000, 0x38048000, 0x3804C000, 0x38050000, 0x38054000, 0x38058000, 0x3805C000, 0x38060000, 0x38064000, 0x38068000, 0x3806C000, 0x38070000, 0x38074000, 0x38078000, 0x3807C000, - 0x38080000, 0x38084000, 0x38088000, 0x3808C000, 0x38090000, 0x38094000, 0x38098000, 0x3809C000, 0x380A0000, 0x380A4000, 0x380A8000, 0x380AC000, 0x380B0000, 0x380B4000, 0x380B8000, 0x380BC000, - 0x380C0000, 0x380C4000, 0x380C8000, 0x380CC000, 0x380D0000, 0x380D4000, 0x380D8000, 0x380DC000, 0x380E0000, 0x380E4000, 0x380E8000, 0x380EC000, 0x380F0000, 0x380F4000, 0x380F8000, 0x380FC000, - 0x38100000, 0x38104000, 0x38108000, 0x3810C000, 0x38110000, 0x38114000, 0x38118000, 0x3811C000, 0x38120000, 0x38124000, 0x38128000, 0x3812C000, 0x38130000, 0x38134000, 0x38138000, 0x3813C000, - 0x38140000, 0x38144000, 0x38148000, 0x3814C000, 0x38150000, 0x38154000, 0x38158000, 0x3815C000, 0x38160000, 0x38164000, 0x38168000, 0x3816C000, 0x38170000, 0x38174000, 0x38178000, 0x3817C000, - 0x38180000, 0x38184000, 0x38188000, 0x3818C000, 0x38190000, 0x38194000, 0x38198000, 0x3819C000, 0x381A0000, 0x381A4000, 0x381A8000, 0x381AC000, 0x381B0000, 0x381B4000, 0x381B8000, 0x381BC000, - 0x381C0000, 0x381C4000, 0x381C8000, 0x381CC000, 0x381D0000, 0x381D4000, 0x381D8000, 0x381DC000, 0x381E0000, 0x381E4000, 0x381E8000, 0x381EC000, 0x381F0000, 0x381F4000, 0x381F8000, 0x381FC000, - 0x38200000, 0x38204000, 0x38208000, 0x3820C000, 0x38210000, 0x38214000, 0x38218000, 0x3821C000, 0x38220000, 0x38224000, 0x38228000, 0x3822C000, 0x38230000, 0x38234000, 0x38238000, 0x3823C000, - 0x38240000, 0x38244000, 0x38248000, 0x3824C000, 0x38250000, 0x38254000, 0x38258000, 0x3825C000, 0x38260000, 0x38264000, 0x38268000, 0x3826C000, 0x38270000, 0x38274000, 0x38278000, 0x3827C000, - 0x38280000, 0x38284000, 0x38288000, 0x3828C000, 0x38290000, 0x38294000, 0x38298000, 0x3829C000, 0x382A0000, 0x382A4000, 0x382A8000, 0x382AC000, 0x382B0000, 0x382B4000, 0x382B8000, 0x382BC000, - 0x382C0000, 0x382C4000, 0x382C8000, 0x382CC000, 0x382D0000, 0x382D4000, 0x382D8000, 0x382DC000, 0x382E0000, 0x382E4000, 0x382E8000, 0x382EC000, 0x382F0000, 0x382F4000, 0x382F8000, 0x382FC000, - 0x38300000, 0x38304000, 0x38308000, 0x3830C000, 0x38310000, 0x38314000, 0x38318000, 0x3831C000, 0x38320000, 0x38324000, 0x38328000, 0x3832C000, 0x38330000, 0x38334000, 0x38338000, 0x3833C000, - 0x38340000, 0x38344000, 0x38348000, 0x3834C000, 0x38350000, 0x38354000, 0x38358000, 0x3835C000, 0x38360000, 0x38364000, 0x38368000, 0x3836C000, 0x38370000, 0x38374000, 0x38378000, 0x3837C000, - 0x38380000, 0x38384000, 0x38388000, 0x3838C000, 0x38390000, 0x38394000, 0x38398000, 0x3839C000, 0x383A0000, 0x383A4000, 0x383A8000, 0x383AC000, 0x383B0000, 0x383B4000, 0x383B8000, 0x383BC000, - 0x383C0000, 0x383C4000, 0x383C8000, 0x383CC000, 0x383D0000, 0x383D4000, 0x383D8000, 0x383DC000, 0x383E0000, 0x383E4000, 0x383E8000, 0x383EC000, 0x383F0000, 0x383F4000, 0x383F8000, 0x383FC000, - 0x38400000, 0x38404000, 0x38408000, 0x3840C000, 0x38410000, 0x38414000, 0x38418000, 0x3841C000, 0x38420000, 0x38424000, 0x38428000, 0x3842C000, 0x38430000, 0x38434000, 0x38438000, 0x3843C000, - 0x38440000, 0x38444000, 0x38448000, 0x3844C000, 0x38450000, 0x38454000, 0x38458000, 0x3845C000, 0x38460000, 0x38464000, 0x38468000, 0x3846C000, 0x38470000, 0x38474000, 0x38478000, 0x3847C000, - 0x38480000, 0x38484000, 0x38488000, 0x3848C000, 0x38490000, 0x38494000, 0x38498000, 0x3849C000, 0x384A0000, 0x384A4000, 0x384A8000, 0x384AC000, 0x384B0000, 0x384B4000, 0x384B8000, 0x384BC000, - 0x384C0000, 0x384C4000, 0x384C8000, 0x384CC000, 0x384D0000, 0x384D4000, 0x384D8000, 0x384DC000, 0x384E0000, 0x384E4000, 0x384E8000, 0x384EC000, 0x384F0000, 0x384F4000, 0x384F8000, 0x384FC000, - 0x38500000, 0x38504000, 0x38508000, 0x3850C000, 0x38510000, 0x38514000, 0x38518000, 0x3851C000, 0x38520000, 0x38524000, 0x38528000, 0x3852C000, 0x38530000, 0x38534000, 0x38538000, 0x3853C000, - 0x38540000, 0x38544000, 0x38548000, 0x3854C000, 0x38550000, 0x38554000, 0x38558000, 0x3855C000, 0x38560000, 0x38564000, 0x38568000, 0x3856C000, 0x38570000, 0x38574000, 0x38578000, 0x3857C000, - 0x38580000, 0x38584000, 0x38588000, 0x3858C000, 0x38590000, 0x38594000, 0x38598000, 0x3859C000, 0x385A0000, 0x385A4000, 0x385A8000, 0x385AC000, 0x385B0000, 0x385B4000, 0x385B8000, 0x385BC000, - 0x385C0000, 0x385C4000, 0x385C8000, 0x385CC000, 0x385D0000, 0x385D4000, 0x385D8000, 0x385DC000, 0x385E0000, 0x385E4000, 0x385E8000, 0x385EC000, 0x385F0000, 0x385F4000, 0x385F8000, 0x385FC000, - 0x38600000, 0x38604000, 0x38608000, 0x3860C000, 0x38610000, 0x38614000, 0x38618000, 0x3861C000, 0x38620000, 0x38624000, 0x38628000, 0x3862C000, 0x38630000, 0x38634000, 0x38638000, 0x3863C000, - 0x38640000, 0x38644000, 0x38648000, 0x3864C000, 0x38650000, 0x38654000, 0x38658000, 0x3865C000, 0x38660000, 0x38664000, 0x38668000, 0x3866C000, 0x38670000, 0x38674000, 0x38678000, 0x3867C000, - 0x38680000, 0x38684000, 0x38688000, 0x3868C000, 0x38690000, 0x38694000, 0x38698000, 0x3869C000, 0x386A0000, 0x386A4000, 0x386A8000, 0x386AC000, 0x386B0000, 0x386B4000, 0x386B8000, 0x386BC000, - 0x386C0000, 0x386C4000, 0x386C8000, 0x386CC000, 0x386D0000, 0x386D4000, 0x386D8000, 0x386DC000, 0x386E0000, 0x386E4000, 0x386E8000, 0x386EC000, 0x386F0000, 0x386F4000, 0x386F8000, 0x386FC000, - 0x38700000, 0x38704000, 0x38708000, 0x3870C000, 0x38710000, 0x38714000, 0x38718000, 0x3871C000, 0x38720000, 0x38724000, 0x38728000, 0x3872C000, 0x38730000, 0x38734000, 0x38738000, 0x3873C000, - 0x38740000, 0x38744000, 0x38748000, 0x3874C000, 0x38750000, 0x38754000, 0x38758000, 0x3875C000, 0x38760000, 0x38764000, 0x38768000, 0x3876C000, 0x38770000, 0x38774000, 0x38778000, 0x3877C000, - 0x38780000, 0x38784000, 0x38788000, 0x3878C000, 0x38790000, 0x38794000, 0x38798000, 0x3879C000, 0x387A0000, 0x387A4000, 0x387A8000, 0x387AC000, 0x387B0000, 0x387B4000, 0x387B8000, 0x387BC000, - 0x387C0000, 0x387C4000, 0x387C8000, 0x387CC000, 0x387D0000, 0x387D4000, 0x387D8000, 0x387DC000, 0x387E0000, 0x387E4000, 0x387E8000, 0x387EC000, 0x387F0000, 0x387F4000, 0x387F8000, 0x387FC000, - 0x38000000, 0x38002000, 0x38004000, 0x38006000, 0x38008000, 0x3800A000, 0x3800C000, 0x3800E000, 0x38010000, 0x38012000, 0x38014000, 0x38016000, 0x38018000, 0x3801A000, 0x3801C000, 0x3801E000, - 0x38020000, 0x38022000, 0x38024000, 0x38026000, 0x38028000, 0x3802A000, 0x3802C000, 0x3802E000, 0x38030000, 0x38032000, 0x38034000, 0x38036000, 0x38038000, 0x3803A000, 0x3803C000, 0x3803E000, - 0x38040000, 0x38042000, 0x38044000, 0x38046000, 0x38048000, 0x3804A000, 0x3804C000, 0x3804E000, 0x38050000, 0x38052000, 0x38054000, 0x38056000, 0x38058000, 0x3805A000, 0x3805C000, 0x3805E000, - 0x38060000, 0x38062000, 0x38064000, 0x38066000, 0x38068000, 0x3806A000, 0x3806C000, 0x3806E000, 0x38070000, 0x38072000, 0x38074000, 0x38076000, 0x38078000, 0x3807A000, 0x3807C000, 0x3807E000, - 0x38080000, 0x38082000, 0x38084000, 0x38086000, 0x38088000, 0x3808A000, 0x3808C000, 0x3808E000, 0x38090000, 0x38092000, 0x38094000, 0x38096000, 0x38098000, 0x3809A000, 0x3809C000, 0x3809E000, - 0x380A0000, 0x380A2000, 0x380A4000, 0x380A6000, 0x380A8000, 0x380AA000, 0x380AC000, 0x380AE000, 0x380B0000, 0x380B2000, 0x380B4000, 0x380B6000, 0x380B8000, 0x380BA000, 0x380BC000, 0x380BE000, - 0x380C0000, 0x380C2000, 0x380C4000, 0x380C6000, 0x380C8000, 0x380CA000, 0x380CC000, 0x380CE000, 0x380D0000, 0x380D2000, 0x380D4000, 0x380D6000, 0x380D8000, 0x380DA000, 0x380DC000, 0x380DE000, - 0x380E0000, 0x380E2000, 0x380E4000, 0x380E6000, 0x380E8000, 0x380EA000, 0x380EC000, 0x380EE000, 0x380F0000, 0x380F2000, 0x380F4000, 0x380F6000, 0x380F8000, 0x380FA000, 0x380FC000, 0x380FE000, - 0x38100000, 0x38102000, 0x38104000, 0x38106000, 0x38108000, 0x3810A000, 0x3810C000, 0x3810E000, 0x38110000, 0x38112000, 0x38114000, 0x38116000, 0x38118000, 0x3811A000, 0x3811C000, 0x3811E000, - 0x38120000, 0x38122000, 0x38124000, 0x38126000, 0x38128000, 0x3812A000, 0x3812C000, 0x3812E000, 0x38130000, 0x38132000, 0x38134000, 0x38136000, 0x38138000, 0x3813A000, 0x3813C000, 0x3813E000, - 0x38140000, 0x38142000, 0x38144000, 0x38146000, 0x38148000, 0x3814A000, 0x3814C000, 0x3814E000, 0x38150000, 0x38152000, 0x38154000, 0x38156000, 0x38158000, 0x3815A000, 0x3815C000, 0x3815E000, - 0x38160000, 0x38162000, 0x38164000, 0x38166000, 0x38168000, 0x3816A000, 0x3816C000, 0x3816E000, 0x38170000, 0x38172000, 0x38174000, 0x38176000, 0x38178000, 0x3817A000, 0x3817C000, 0x3817E000, - 0x38180000, 0x38182000, 0x38184000, 0x38186000, 0x38188000, 0x3818A000, 0x3818C000, 0x3818E000, 0x38190000, 0x38192000, 0x38194000, 0x38196000, 0x38198000, 0x3819A000, 0x3819C000, 0x3819E000, - 0x381A0000, 0x381A2000, 0x381A4000, 0x381A6000, 0x381A8000, 0x381AA000, 0x381AC000, 0x381AE000, 0x381B0000, 0x381B2000, 0x381B4000, 0x381B6000, 0x381B8000, 0x381BA000, 0x381BC000, 0x381BE000, - 0x381C0000, 0x381C2000, 0x381C4000, 0x381C6000, 0x381C8000, 0x381CA000, 0x381CC000, 0x381CE000, 0x381D0000, 0x381D2000, 0x381D4000, 0x381D6000, 0x381D8000, 0x381DA000, 0x381DC000, 0x381DE000, - 0x381E0000, 0x381E2000, 0x381E4000, 0x381E6000, 0x381E8000, 0x381EA000, 0x381EC000, 0x381EE000, 0x381F0000, 0x381F2000, 0x381F4000, 0x381F6000, 0x381F8000, 0x381FA000, 0x381FC000, 0x381FE000, - 0x38200000, 0x38202000, 0x38204000, 0x38206000, 0x38208000, 0x3820A000, 0x3820C000, 0x3820E000, 0x38210000, 0x38212000, 0x38214000, 0x38216000, 0x38218000, 0x3821A000, 0x3821C000, 0x3821E000, - 0x38220000, 0x38222000, 0x38224000, 0x38226000, 0x38228000, 0x3822A000, 0x3822C000, 0x3822E000, 0x38230000, 0x38232000, 0x38234000, 0x38236000, 0x38238000, 0x3823A000, 0x3823C000, 0x3823E000, - 0x38240000, 0x38242000, 0x38244000, 0x38246000, 0x38248000, 0x3824A000, 0x3824C000, 0x3824E000, 0x38250000, 0x38252000, 0x38254000, 0x38256000, 0x38258000, 0x3825A000, 0x3825C000, 0x3825E000, - 0x38260000, 0x38262000, 0x38264000, 0x38266000, 0x38268000, 0x3826A000, 0x3826C000, 0x3826E000, 0x38270000, 0x38272000, 0x38274000, 0x38276000, 0x38278000, 0x3827A000, 0x3827C000, 0x3827E000, - 0x38280000, 0x38282000, 0x38284000, 0x38286000, 0x38288000, 0x3828A000, 0x3828C000, 0x3828E000, 0x38290000, 0x38292000, 0x38294000, 0x38296000, 0x38298000, 0x3829A000, 0x3829C000, 0x3829E000, - 0x382A0000, 0x382A2000, 0x382A4000, 0x382A6000, 0x382A8000, 0x382AA000, 0x382AC000, 0x382AE000, 0x382B0000, 0x382B2000, 0x382B4000, 0x382B6000, 0x382B8000, 0x382BA000, 0x382BC000, 0x382BE000, - 0x382C0000, 0x382C2000, 0x382C4000, 0x382C6000, 0x382C8000, 0x382CA000, 0x382CC000, 0x382CE000, 0x382D0000, 0x382D2000, 0x382D4000, 0x382D6000, 0x382D8000, 0x382DA000, 0x382DC000, 0x382DE000, - 0x382E0000, 0x382E2000, 0x382E4000, 0x382E6000, 0x382E8000, 0x382EA000, 0x382EC000, 0x382EE000, 0x382F0000, 0x382F2000, 0x382F4000, 0x382F6000, 0x382F8000, 0x382FA000, 0x382FC000, 0x382FE000, - 0x38300000, 0x38302000, 0x38304000, 0x38306000, 0x38308000, 0x3830A000, 0x3830C000, 0x3830E000, 0x38310000, 0x38312000, 0x38314000, 0x38316000, 0x38318000, 0x3831A000, 0x3831C000, 0x3831E000, - 0x38320000, 0x38322000, 0x38324000, 0x38326000, 0x38328000, 0x3832A000, 0x3832C000, 0x3832E000, 0x38330000, 0x38332000, 0x38334000, 0x38336000, 0x38338000, 0x3833A000, 0x3833C000, 0x3833E000, - 0x38340000, 0x38342000, 0x38344000, 0x38346000, 0x38348000, 0x3834A000, 0x3834C000, 0x3834E000, 0x38350000, 0x38352000, 0x38354000, 0x38356000, 0x38358000, 0x3835A000, 0x3835C000, 0x3835E000, - 0x38360000, 0x38362000, 0x38364000, 0x38366000, 0x38368000, 0x3836A000, 0x3836C000, 0x3836E000, 0x38370000, 0x38372000, 0x38374000, 0x38376000, 0x38378000, 0x3837A000, 0x3837C000, 0x3837E000, - 0x38380000, 0x38382000, 0x38384000, 0x38386000, 0x38388000, 0x3838A000, 0x3838C000, 0x3838E000, 0x38390000, 0x38392000, 0x38394000, 0x38396000, 0x38398000, 0x3839A000, 0x3839C000, 0x3839E000, - 0x383A0000, 0x383A2000, 0x383A4000, 0x383A6000, 0x383A8000, 0x383AA000, 0x383AC000, 0x383AE000, 0x383B0000, 0x383B2000, 0x383B4000, 0x383B6000, 0x383B8000, 0x383BA000, 0x383BC000, 0x383BE000, - 0x383C0000, 0x383C2000, 0x383C4000, 0x383C6000, 0x383C8000, 0x383CA000, 0x383CC000, 0x383CE000, 0x383D0000, 0x383D2000, 0x383D4000, 0x383D6000, 0x383D8000, 0x383DA000, 0x383DC000, 0x383DE000, - 0x383E0000, 0x383E2000, 0x383E4000, 0x383E6000, 0x383E8000, 0x383EA000, 0x383EC000, 0x383EE000, 0x383F0000, 0x383F2000, 0x383F4000, 0x383F6000, 0x383F8000, 0x383FA000, 0x383FC000, 0x383FE000, - 0x38400000, 0x38402000, 0x38404000, 0x38406000, 0x38408000, 0x3840A000, 0x3840C000, 0x3840E000, 0x38410000, 0x38412000, 0x38414000, 0x38416000, 0x38418000, 0x3841A000, 0x3841C000, 0x3841E000, - 0x38420000, 0x38422000, 0x38424000, 0x38426000, 0x38428000, 0x3842A000, 0x3842C000, 0x3842E000, 0x38430000, 0x38432000, 0x38434000, 0x38436000, 0x38438000, 0x3843A000, 0x3843C000, 0x3843E000, - 0x38440000, 0x38442000, 0x38444000, 0x38446000, 0x38448000, 0x3844A000, 0x3844C000, 0x3844E000, 0x38450000, 0x38452000, 0x38454000, 0x38456000, 0x38458000, 0x3845A000, 0x3845C000, 0x3845E000, - 0x38460000, 0x38462000, 0x38464000, 0x38466000, 0x38468000, 0x3846A000, 0x3846C000, 0x3846E000, 0x38470000, 0x38472000, 0x38474000, 0x38476000, 0x38478000, 0x3847A000, 0x3847C000, 0x3847E000, - 0x38480000, 0x38482000, 0x38484000, 0x38486000, 0x38488000, 0x3848A000, 0x3848C000, 0x3848E000, 0x38490000, 0x38492000, 0x38494000, 0x38496000, 0x38498000, 0x3849A000, 0x3849C000, 0x3849E000, - 0x384A0000, 0x384A2000, 0x384A4000, 0x384A6000, 0x384A8000, 0x384AA000, 0x384AC000, 0x384AE000, 0x384B0000, 0x384B2000, 0x384B4000, 0x384B6000, 0x384B8000, 0x384BA000, 0x384BC000, 0x384BE000, - 0x384C0000, 0x384C2000, 0x384C4000, 0x384C6000, 0x384C8000, 0x384CA000, 0x384CC000, 0x384CE000, 0x384D0000, 0x384D2000, 0x384D4000, 0x384D6000, 0x384D8000, 0x384DA000, 0x384DC000, 0x384DE000, - 0x384E0000, 0x384E2000, 0x384E4000, 0x384E6000, 0x384E8000, 0x384EA000, 0x384EC000, 0x384EE000, 0x384F0000, 0x384F2000, 0x384F4000, 0x384F6000, 0x384F8000, 0x384FA000, 0x384FC000, 0x384FE000, - 0x38500000, 0x38502000, 0x38504000, 0x38506000, 0x38508000, 0x3850A000, 0x3850C000, 0x3850E000, 0x38510000, 0x38512000, 0x38514000, 0x38516000, 0x38518000, 0x3851A000, 0x3851C000, 0x3851E000, - 0x38520000, 0x38522000, 0x38524000, 0x38526000, 0x38528000, 0x3852A000, 0x3852C000, 0x3852E000, 0x38530000, 0x38532000, 0x38534000, 0x38536000, 0x38538000, 0x3853A000, 0x3853C000, 0x3853E000, - 0x38540000, 0x38542000, 0x38544000, 0x38546000, 0x38548000, 0x3854A000, 0x3854C000, 0x3854E000, 0x38550000, 0x38552000, 0x38554000, 0x38556000, 0x38558000, 0x3855A000, 0x3855C000, 0x3855E000, - 0x38560000, 0x38562000, 0x38564000, 0x38566000, 0x38568000, 0x3856A000, 0x3856C000, 0x3856E000, 0x38570000, 0x38572000, 0x38574000, 0x38576000, 0x38578000, 0x3857A000, 0x3857C000, 0x3857E000, - 0x38580000, 0x38582000, 0x38584000, 0x38586000, 0x38588000, 0x3858A000, 0x3858C000, 0x3858E000, 0x38590000, 0x38592000, 0x38594000, 0x38596000, 0x38598000, 0x3859A000, 0x3859C000, 0x3859E000, - 0x385A0000, 0x385A2000, 0x385A4000, 0x385A6000, 0x385A8000, 0x385AA000, 0x385AC000, 0x385AE000, 0x385B0000, 0x385B2000, 0x385B4000, 0x385B6000, 0x385B8000, 0x385BA000, 0x385BC000, 0x385BE000, - 0x385C0000, 0x385C2000, 0x385C4000, 0x385C6000, 0x385C8000, 0x385CA000, 0x385CC000, 0x385CE000, 0x385D0000, 0x385D2000, 0x385D4000, 0x385D6000, 0x385D8000, 0x385DA000, 0x385DC000, 0x385DE000, - 0x385E0000, 0x385E2000, 0x385E4000, 0x385E6000, 0x385E8000, 0x385EA000, 0x385EC000, 0x385EE000, 0x385F0000, 0x385F2000, 0x385F4000, 0x385F6000, 0x385F8000, 0x385FA000, 0x385FC000, 0x385FE000, - 0x38600000, 0x38602000, 0x38604000, 0x38606000, 0x38608000, 0x3860A000, 0x3860C000, 0x3860E000, 0x38610000, 0x38612000, 0x38614000, 0x38616000, 0x38618000, 0x3861A000, 0x3861C000, 0x3861E000, - 0x38620000, 0x38622000, 0x38624000, 0x38626000, 0x38628000, 0x3862A000, 0x3862C000, 0x3862E000, 0x38630000, 0x38632000, 0x38634000, 0x38636000, 0x38638000, 0x3863A000, 0x3863C000, 0x3863E000, - 0x38640000, 0x38642000, 0x38644000, 0x38646000, 0x38648000, 0x3864A000, 0x3864C000, 0x3864E000, 0x38650000, 0x38652000, 0x38654000, 0x38656000, 0x38658000, 0x3865A000, 0x3865C000, 0x3865E000, - 0x38660000, 0x38662000, 0x38664000, 0x38666000, 0x38668000, 0x3866A000, 0x3866C000, 0x3866E000, 0x38670000, 0x38672000, 0x38674000, 0x38676000, 0x38678000, 0x3867A000, 0x3867C000, 0x3867E000, - 0x38680000, 0x38682000, 0x38684000, 0x38686000, 0x38688000, 0x3868A000, 0x3868C000, 0x3868E000, 0x38690000, 0x38692000, 0x38694000, 0x38696000, 0x38698000, 0x3869A000, 0x3869C000, 0x3869E000, - 0x386A0000, 0x386A2000, 0x386A4000, 0x386A6000, 0x386A8000, 0x386AA000, 0x386AC000, 0x386AE000, 0x386B0000, 0x386B2000, 0x386B4000, 0x386B6000, 0x386B8000, 0x386BA000, 0x386BC000, 0x386BE000, - 0x386C0000, 0x386C2000, 0x386C4000, 0x386C6000, 0x386C8000, 0x386CA000, 0x386CC000, 0x386CE000, 0x386D0000, 0x386D2000, 0x386D4000, 0x386D6000, 0x386D8000, 0x386DA000, 0x386DC000, 0x386DE000, - 0x386E0000, 0x386E2000, 0x386E4000, 0x386E6000, 0x386E8000, 0x386EA000, 0x386EC000, 0x386EE000, 0x386F0000, 0x386F2000, 0x386F4000, 0x386F6000, 0x386F8000, 0x386FA000, 0x386FC000, 0x386FE000, - 0x38700000, 0x38702000, 0x38704000, 0x38706000, 0x38708000, 0x3870A000, 0x3870C000, 0x3870E000, 0x38710000, 0x38712000, 0x38714000, 0x38716000, 0x38718000, 0x3871A000, 0x3871C000, 0x3871E000, - 0x38720000, 0x38722000, 0x38724000, 0x38726000, 0x38728000, 0x3872A000, 0x3872C000, 0x3872E000, 0x38730000, 0x38732000, 0x38734000, 0x38736000, 0x38738000, 0x3873A000, 0x3873C000, 0x3873E000, - 0x38740000, 0x38742000, 0x38744000, 0x38746000, 0x38748000, 0x3874A000, 0x3874C000, 0x3874E000, 0x38750000, 0x38752000, 0x38754000, 0x38756000, 0x38758000, 0x3875A000, 0x3875C000, 0x3875E000, - 0x38760000, 0x38762000, 0x38764000, 0x38766000, 0x38768000, 0x3876A000, 0x3876C000, 0x3876E000, 0x38770000, 0x38772000, 0x38774000, 0x38776000, 0x38778000, 0x3877A000, 0x3877C000, 0x3877E000, - 0x38780000, 0x38782000, 0x38784000, 0x38786000, 0x38788000, 0x3878A000, 0x3878C000, 0x3878E000, 0x38790000, 0x38792000, 0x38794000, 0x38796000, 0x38798000, 0x3879A000, 0x3879C000, 0x3879E000, - 0x387A0000, 0x387A2000, 0x387A4000, 0x387A6000, 0x387A8000, 0x387AA000, 0x387AC000, 0x387AE000, 0x387B0000, 0x387B2000, 0x387B4000, 0x387B6000, 0x387B8000, 0x387BA000, 0x387BC000, 0x387BE000, - 0x387C0000, 0x387C2000, 0x387C4000, 0x387C6000, 0x387C8000, 0x387CA000, 0x387CC000, 0x387CE000, 0x387D0000, 0x387D2000, 0x387D4000, 0x387D6000, 0x387D8000, 0x387DA000, 0x387DC000, 0x387DE000, - 0x387E0000, 0x387E2000, 0x387E4000, 0x387E6000, 0x387E8000, 0x387EA000, 0x387EC000, 0x387EE000, 0x387F0000, 0x387F2000, 0x387F4000, 0x387F6000, 0x387F8000, 0x387FA000, 0x387FC000, 0x387FE000 }; - static const uint32 exponent_table[64] = { - 0x00000000, 0x00800000, 0x01000000, 0x01800000, 0x02000000, 0x02800000, 0x03000000, 0x03800000, 0x04000000, 0x04800000, 0x05000000, 0x05800000, 0x06000000, 0x06800000, 0x07000000, 0x07800000, - 0x08000000, 0x08800000, 0x09000000, 0x09800000, 0x0A000000, 0x0A800000, 0x0B000000, 0x0B800000, 0x0C000000, 0x0C800000, 0x0D000000, 0x0D800000, 0x0E000000, 0x0E800000, 0x0F000000, 0x47800000, - 0x80000000, 0x80800000, 0x81000000, 0x81800000, 0x82000000, 0x82800000, 0x83000000, 0x83800000, 0x84000000, 0x84800000, 0x85000000, 0x85800000, 0x86000000, 0x86800000, 0x87000000, 0x87800000, - 0x88000000, 0x88800000, 0x89000000, 0x89800000, 0x8A000000, 0x8A800000, 0x8B000000, 0x8B800000, 0x8C000000, 0x8C800000, 0x8D000000, 0x8D800000, 0x8E000000, 0x8E800000, 0x8F000000, 0xC7800000 }; - static const unsigned short offset_table[64] = { - 0, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, - 0, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024, 1024 }; - uint32 bits = mantissa_table[offset_table[value>>10]+(value&0x3FF)] + exponent_table[value>>10]; -// return *reinterpret_cast(&bits); //violating strict aliasing! - float out; - std::memcpy(&out, &bits, sizeof(float)); - return out; - } - - /// Convert half-precision to IEEE double-precision. - /// \param value binary representation of half-precision value - /// \return double-precision value - inline double half2float_impl(uint16 value, double, true_type) - { - typedef bits::type uint32; - typedef bits::type uint64; - uint32 hi = static_cast(value&0x8000) << 16; - int abs = value & 0x7FFF; - if(abs) - { - hi |= 0x3F000000 << static_cast(abs>=0x7C00); - for(; abs<0x400; abs<<=1,hi-=0x100000) ; - hi += static_cast(abs) << 10; - } - uint64 bits = static_cast(hi) << 32; -// return *reinterpret_cast(&bits); //violating strict aliasing! - double out; - std::memcpy(&out, &bits, sizeof(double)); - return out; - } - - /// Convert half-precision to non-IEEE floating point. - /// \tparam T type to convert to (builtin integer type) - /// \param value binary representation of half-precision value - /// \return floating point value - template T half2float_impl(uint16 value, T, ...) - { - T out; - int abs = value & 0x7FFF; - if(abs > 0x7C00) - out = std::numeric_limits::has_quiet_NaN ? std::numeric_limits::quiet_NaN() : T(); - else if(abs == 0x7C00) - out = std::numeric_limits::has_infinity ? std::numeric_limits::infinity() : std::numeric_limits::max(); - else if(abs > 0x3FF) - out = std::ldexp(static_cast((abs&0x3FF)|0x400), (abs>>10)-25); - else - out = std::ldexp(static_cast(abs), -24); - return (value&0x8000) ? -out : out; - } - - /// Convert half-precision to floating point. - /// \tparam T type to convert to (builtin integer type) - /// \param value binary representation of half-precision value - /// \return floating point value - template T half2float(uint16 value) - { - return half2float_impl(value, T(), bool_type::is_iec559&&sizeof(typename bits::type)==sizeof(T)>()); - } - - /// Convert half-precision floating point to integer. - /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding - /// \tparam E `true` for round to even, `false` for round away from zero - /// \tparam T type to convert to (builtin integer type with at least 16 bits precision, excluding any implicit sign bits) - /// \param value binary representation of half-precision value - /// \return integral value - template T half2int_impl(uint16 value) - { - #if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS - static_assert(std::is_integral::value, "half to int conversion only supports builtin integer types"); - #endif - unsigned int e = value & 0x7FFF; - if(e >= 0x7C00) - return (value&0x8000) ? std::numeric_limits::min() : std::numeric_limits::max(); - if(e < 0x3800) - { - if(R == std::round_toward_infinity) - return T(~(value>>15)&(e!=0)); - else if(R == std::round_toward_neg_infinity) - return -T(value>0x8000); - return T(); - } - unsigned int m = (value&0x3FF) | 0x400; - e >>= 10; - if(e < 25) - { - if(R == std::round_to_nearest) - m += (1<<(24-e)) - (~(m>>(25-e))&E); - else if(R == std::round_toward_infinity) - m += ((value>>15)-1) & ((1<<(25-e))-1U); - else if(R == std::round_toward_neg_infinity) - m += -(value>>15) & ((1<<(25-e))-1U); - m >>= 25 - e; - } - else - m <<= e - 25; - return (value&0x8000) ? -static_cast(m) : static_cast(m); - } - - /// Convert half-precision floating point to integer. - /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding - /// \tparam T type to convert to (builtin integer type with at least 16 bits precision, excluding any implicit sign bits) - /// \param value binary representation of half-precision value - /// \return integral value - template T half2int(uint16 value) { return half2int_impl(value); } - - /// Convert half-precision floating point to integer using round-to-nearest-away-from-zero. - /// \tparam T type to convert to (builtin integer type with at least 16 bits precision, excluding any implicit sign bits) - /// \param value binary representation of half-precision value - /// \return integral value - template T half2int_up(uint16 value) { return half2int_impl(value); } - - /// Round half-precision number to nearest integer value. - /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding - /// \tparam E `true` for round to even, `false` for round away from zero - /// \param value binary representation of half-precision value - /// \return half-precision bits for nearest integral value - template uint16 round_half_impl(uint16 value) - { - unsigned int e = value & 0x7FFF; - uint16 result = value; - if(e < 0x3C00) - { - result &= 0x8000; - if(R == std::round_to_nearest) - result |= 0x3C00U & -(e>=(0x3800+E)); - else if(R == std::round_toward_infinity) - result |= 0x3C00U & -(~(value>>15)&(e!=0)); - else if(R == std::round_toward_neg_infinity) - result |= 0x3C00U & -(value>0x8000); - } - else if(e < 0x6400) - { - e = 25 - (e>>10); - unsigned int mask = (1<>e)&E); - else if(R == std::round_toward_infinity) - result += mask & ((value>>15)-1); - else if(R == std::round_toward_neg_infinity) - result += mask & -(value>>15); - result &= ~mask; - } - return result; - } - - /// Round half-precision number to nearest integer value. - /// \tparam R rounding mode to use, `std::round_indeterminate` for fastest rounding - /// \param value binary representation of half-precision value - /// \return half-precision bits for nearest integral value - template uint16 round_half(uint16 value) { return round_half_impl(value); } - - /// Round half-precision number to nearest integer value using round-to-nearest-away-from-zero. - /// \param value binary representation of half-precision value - /// \return half-precision bits for nearest integral value - inline uint16 round_half_up(uint16 value) { return round_half_impl(value); } - /// \} - - struct functions; - template struct unary_specialized; - template struct binary_specialized; - template struct half_caster; - } - - /// Half-precision floating point type. - /// This class implements an IEEE-conformant half-precision floating point type with the usual arithmetic operators and - /// conversions. It is implicitly convertible to single-precision floating point, which makes arithmetic expressions and - /// functions with mixed-type operands to be of the most precise operand type. Additionally all arithmetic operations - /// (and many mathematical functions) are carried out in single-precision internally. All conversions from single- to - /// half-precision are done using the library's default rounding mode, but temporary results inside chained arithmetic - /// expressions are kept in single-precision as long as possible (while of course still maintaining a strong half-precision type). - /// - /// According to the C++98/03 definition, the half type is not a POD type. But according to C++11's less strict and - /// extended definitions it is both a standard layout type and a trivially copyable type (even if not a POD type), which - /// means it can be standard-conformantly copied using raw binary copies. But in this context some more words about the - /// actual size of the type. Although the half is representing an IEEE 16-bit type, it does not necessarily have to be of - /// exactly 16-bits size. But on any reasonable implementation the actual binary representation of this type will most - /// probably not ivolve any additional "magic" or padding beyond the simple binary representation of the underlying 16-bit - /// IEEE number, even if not strictly guaranteed by the standard. But even then it only has an actual size of 16 bits if - /// your C++ implementation supports an unsigned integer type of exactly 16 bits width. But this should be the case on - /// nearly any reasonable platform. - /// - /// So if your C++ implementation is not totally exotic or imposes special alignment requirements, it is a reasonable - /// assumption that the data of a half is just comprised of the 2 bytes of the underlying IEEE representation. - class half - { - friend struct detail::functions; - friend struct detail::unary_specialized; - friend struct detail::binary_specialized; - template friend struct detail::half_caster; - friend class std::numeric_limits; - #if HALF_ENABLE_CPP11_HASH - friend struct std::hash; - #endif - #if HALF_ENABLE_CPP11_USER_LITERALS - friend half literal::operator""_h(long double); - #endif - - public: - /// Default constructor. - /// This initializes the half to 0. Although this does not match the builtin types' default-initialization semantics - /// and may be less efficient than no initialization, it is needed to provide proper value-initialization semantics. - HALF_CONSTEXPR half() HALF_NOEXCEPT : data_() {} - - /// Copy constructor. - /// \tparam T type of concrete half expression - /// \param rhs half expression to copy from - half(detail::expr rhs) : data_(detail::float2half(static_cast(rhs))) {} - - /// Conversion constructor. - /// \param rhs float to convert - explicit half(float rhs) : data_(detail::float2half(rhs)) {} - - /// Conversion to single-precision. - /// \return single precision value representing expression value - operator float() const { return detail::half2float(data_); } - - /// Assignment operator. - /// \tparam T type of concrete half expression - /// \param rhs half expression to copy from - /// \return reference to this half - half& operator=(detail::expr rhs) { return *this = static_cast(rhs); } - - /// Arithmetic assignment. - /// \tparam T type of concrete half expression - /// \param rhs half expression to add - /// \return reference to this half - template typename detail::enable::type operator+=(T rhs) { return *this += static_cast(rhs); } - - /// Arithmetic assignment. - /// \tparam T type of concrete half expression - /// \param rhs half expression to subtract - /// \return reference to this half - template typename detail::enable::type operator-=(T rhs) { return *this -= static_cast(rhs); } - - /// Arithmetic assignment. - /// \tparam T type of concrete half expression - /// \param rhs half expression to multiply with - /// \return reference to this half - template typename detail::enable::type operator*=(T rhs) { return *this *= static_cast(rhs); } - - /// Arithmetic assignment. - /// \tparam T type of concrete half expression - /// \param rhs half expression to divide by - /// \return reference to this half - template typename detail::enable::type operator/=(T rhs) { return *this /= static_cast(rhs); } - - /// Assignment operator. - /// \param rhs single-precision value to copy from - /// \return reference to this half - half& operator=(float rhs) { data_ = detail::float2half(rhs); return *this; } - - /// Arithmetic assignment. - /// \param rhs single-precision value to add - /// \return reference to this half - half& operator+=(float rhs) { data_ = detail::float2half(detail::half2float(data_)+rhs); return *this; } - - /// Arithmetic assignment. - /// \param rhs single-precision value to subtract - /// \return reference to this half - half& operator-=(float rhs) { data_ = detail::float2half(detail::half2float(data_)-rhs); return *this; } - - /// Arithmetic assignment. - /// \param rhs single-precision value to multiply with - /// \return reference to this half - half& operator*=(float rhs) { data_ = detail::float2half(detail::half2float(data_)*rhs); return *this; } - - /// Arithmetic assignment. - /// \param rhs single-precision value to divide by - /// \return reference to this half - half& operator/=(float rhs) { data_ = detail::float2half(detail::half2float(data_)/rhs); return *this; } - - /// Prefix increment. - /// \return incremented half value - half& operator++() { return *this += 1.0f; } - - /// Prefix decrement. - /// \return decremented half value - half& operator--() { return *this -= 1.0f; } - - /// Postfix increment. - /// \return non-incremented half value - half operator++(int) { half out(*this); ++*this; return out; } - - /// Postfix decrement. - /// \return non-decremented half value - half operator--(int) { half out(*this); --*this; return out; } - - private: - /// Rounding mode to use - static const std::float_round_style round_style = (std::float_round_style)(HALF_ROUND_STYLE); - - /// Constructor. - /// \param bits binary representation to set half to - HALF_CONSTEXPR half(detail::binary_t, detail::uint16 bits) HALF_NOEXCEPT : data_(bits) {} - - /// Internal binary representation - detail::uint16 data_; - }; - -#if HALF_ENABLE_CPP11_USER_LITERALS - namespace literal - { - /// Half literal. - /// While this returns an actual half-precision value, half literals can unfortunately not be constant expressions due - /// to rather involved conversions. - /// \param value literal value - /// \return half with given value (if representable) - inline half operator""_h(long double value) { return half(detail::binary, detail::float2half(value)); } - } -#endif - - namespace detail - { - /// Wrapper implementing unspecialized half-precision functions. - struct functions - { - /// Addition implementation. - /// \param x first operand - /// \param y second operand - /// \return Half-precision sum stored in single-precision - static expr plus(float x, float y) { return expr(x+y); } - - /// Subtraction implementation. - /// \param x first operand - /// \param y second operand - /// \return Half-precision difference stored in single-precision - static expr minus(float x, float y) { return expr(x-y); } - - /// Multiplication implementation. - /// \param x first operand - /// \param y second operand - /// \return Half-precision product stored in single-precision - static expr multiplies(float x, float y) { return expr(x*y); } - - /// Division implementation. - /// \param x first operand - /// \param y second operand - /// \return Half-precision quotient stored in single-precision - static expr divides(float x, float y) { return expr(x/y); } - - /// Output implementation. - /// \param out stream to write to - /// \param arg value to write - /// \return reference to stream - template static std::basic_ostream& write(std::basic_ostream &out, float arg) { return out << arg; } - - /// Input implementation. - /// \param in stream to read from - /// \param arg half to read into - /// \return reference to stream - template static std::basic_istream& read(std::basic_istream &in, half &arg) - { - float f; - if(in >> f) - arg = f; - return in; - } - - /// Modulo implementation. - /// \param x first operand - /// \param y second operand - /// \return Half-precision division remainder stored in single-precision - static expr fmod(float x, float y) { return expr(std::fmod(x, y)); } - - /// Remainder implementation. - /// \param x first operand - /// \param y second operand - /// \return Half-precision division remainder stored in single-precision - static expr remainder(float x, float y) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::remainder(x, y)); - #else - if(builtin_isnan(x) || builtin_isnan(y)) - return expr(std::numeric_limits::quiet_NaN()); - float ax = std::fabs(x), ay = std::fabs(y); - if(ax >= 65536.0f || ay < std::ldexp(1.0f, -24)) - return expr(std::numeric_limits::quiet_NaN()); - if(ay >= 65536.0f) - return expr(x); - if(ax == ay) - return expr(builtin_signbit(x) ? -0.0f : 0.0f); - ax = std::fmod(ax, ay+ay); - float y2 = 0.5f * ay; - if(ax > y2) - { - ax -= ay; - if(ax >= y2) - ax -= ay; - } - return expr(builtin_signbit(x) ? -ax : ax); - #endif - } - - /// Remainder implementation. - /// \param x first operand - /// \param y second operand - /// \param quo address to store quotient bits at - /// \return Half-precision division remainder stored in single-precision - static expr remquo(float x, float y, int *quo) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::remquo(x, y, quo)); - #else - if(builtin_isnan(x) || builtin_isnan(y)) - return expr(std::numeric_limits::quiet_NaN()); - bool sign = builtin_signbit(x), qsign = static_cast(sign^builtin_signbit(y)); - float ax = std::fabs(x), ay = std::fabs(y); - if(ax >= 65536.0f || ay < std::ldexp(1.0f, -24)) - return expr(std::numeric_limits::quiet_NaN()); - if(ay >= 65536.0f) - return expr(x); - if(ax == ay) - return *quo = qsign ? -1 : 1, expr(sign ? -0.0f : 0.0f); - ax = std::fmod(ax, 8.0f*ay); - int cquo = 0; - if(ax >= 4.0f * ay) - { - ax -= 4.0f * ay; - cquo += 4; - } - if(ax >= 2.0f * ay) - { - ax -= 2.0f * ay; - cquo += 2; - } - float y2 = 0.5f * ay; - if(ax > y2) - { - ax -= ay; - ++cquo; - if(ax >= y2) - { - ax -= ay; - ++cquo; - } - } - return *quo = qsign ? -cquo : cquo, expr(sign ? -ax : ax); - #endif - } - - /// Positive difference implementation. - /// \param x first operand - /// \param y second operand - /// \return Positive difference stored in single-precision - static expr fdim(float x, float y) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::fdim(x, y)); - #else - return expr((x<=y) ? 0.0f : (x-y)); - #endif - } - - /// Fused multiply-add implementation. - /// \param x first operand - /// \param y second operand - /// \param z third operand - /// \return \a x * \a y + \a z stored in single-precision - static expr fma(float x, float y, float z) - { - #if HALF_ENABLE_CPP11_CMATH && defined(FP_FAST_FMAF) - return expr(std::fma(x, y, z)); - #else - return expr(x*y+z); - #endif - } - - /// Get NaN. - /// \return Half-precision quiet NaN - static half nanh() { return half(binary, 0x7FFF); } - - /// Exponential implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr exp(float arg) { return expr(std::exp(arg)); } - - /// Exponential implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr expm1(float arg) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::expm1(arg)); - #else - return expr(static_cast(std::exp(static_cast(arg))-1.0)); - #endif - } - - /// Binary exponential implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr exp2(float arg) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::exp2(arg)); - #else - return expr(static_cast(std::exp(arg*0.69314718055994530941723212145818))); - #endif - } - - /// Logarithm implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr log(float arg) { return expr(std::log(arg)); } - - /// Common logarithm implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr log10(float arg) { return expr(std::log10(arg)); } - - /// Logarithm implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr log1p(float arg) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::log1p(arg)); - #else - return expr(static_cast(std::log(1.0+arg))); - #endif - } - - /// Binary logarithm implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr log2(float arg) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::log2(arg)); - #else - return expr(static_cast(std::log(static_cast(arg))*1.4426950408889634073599246810019)); - #endif - } - - /// Square root implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr sqrt(float arg) { return expr(std::sqrt(arg)); } - - /// Cubic root implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr cbrt(float arg) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::cbrt(arg)); - #else - if(builtin_isnan(arg) || builtin_isinf(arg)) - return expr(arg); - return expr(builtin_signbit(arg) ? -static_cast(std::pow(-static_cast(arg), 1.0/3.0)) : - static_cast(std::pow(static_cast(arg), 1.0/3.0))); - #endif - } - - /// Hypotenuse implementation. - /// \param x first argument - /// \param y second argument - /// \return function value stored in single-preicision - static expr hypot(float x, float y) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::hypot(x, y)); - #else - return expr((builtin_isinf(x) || builtin_isinf(y)) ? std::numeric_limits::infinity() : - static_cast(std::sqrt(static_cast(x)*x+static_cast(y)*y))); - #endif - } - - /// Power implementation. - /// \param base value to exponentiate - /// \param exp power to expontiate to - /// \return function value stored in single-preicision - static expr pow(float base, float exp) { return expr(std::pow(base, exp)); } - - /// Sine implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr sin(float arg) { return expr(std::sin(arg)); } - - /// Cosine implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr cos(float arg) { return expr(std::cos(arg)); } - - /// Tan implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr tan(float arg) { return expr(std::tan(arg)); } - - /// Arc sine implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr asin(float arg) { return expr(std::asin(arg)); } - - /// Arc cosine implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr acos(float arg) { return expr(std::acos(arg)); } - - /// Arc tangent implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr atan(float arg) { return expr(std::atan(arg)); } - - /// Arc tangent implementation. - /// \param x first argument - /// \param y second argument - /// \return function value stored in single-preicision - static expr atan2(float x, float y) { return expr(std::atan2(x, y)); } - - /// Hyperbolic sine implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr sinh(float arg) { return expr(std::sinh(arg)); } - - /// Hyperbolic cosine implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr cosh(float arg) { return expr(std::cosh(arg)); } - - /// Hyperbolic tangent implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr tanh(float arg) { return expr(std::tanh(arg)); } - - /// Hyperbolic area sine implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr asinh(float arg) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::asinh(arg)); - #else - return expr((arg==-std::numeric_limits::infinity()) ? arg : static_cast(std::log(arg+std::sqrt(arg*arg+1.0)))); - #endif - } - - /// Hyperbolic area cosine implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr acosh(float arg) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::acosh(arg)); - #else - return expr((arg<-1.0f) ? std::numeric_limits::quiet_NaN() : static_cast(std::log(arg+std::sqrt(arg*arg-1.0)))); - #endif - } - - /// Hyperbolic area tangent implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr atanh(float arg) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::atanh(arg)); - #else - return expr(static_cast(0.5*std::log((1.0+arg)/(1.0-arg)))); - #endif - } - - /// Error function implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr erf(float arg) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::erf(arg)); - #else - return expr(static_cast(erf(static_cast(arg)))); - #endif - } - - /// Complementary implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr erfc(float arg) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::erfc(arg)); - #else - return expr(static_cast(1.0-erf(static_cast(arg)))); - #endif - } - - /// Gamma logarithm implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr lgamma(float arg) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::lgamma(arg)); - #else - if(builtin_isinf(arg)) - return expr(std::numeric_limits::infinity()); - if(arg < 0.0f) - { - float i, f = std::modf(-arg, &i); - if(f == 0.0f) - return expr(std::numeric_limits::infinity()); - return expr(static_cast(1.1447298858494001741434273513531- - std::log(std::abs(std::sin(3.1415926535897932384626433832795*f)))-lgamma(1.0-arg))); - } - return expr(static_cast(lgamma(static_cast(arg)))); - #endif - } - - /// Gamma implementation. - /// \param arg function argument - /// \return function value stored in single-preicision - static expr tgamma(float arg) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::tgamma(arg)); - #else - if(arg == 0.0f) - return builtin_signbit(arg) ? expr(-std::numeric_limits::infinity()) : expr(std::numeric_limits::infinity()); - if(arg < 0.0f) - { - float i, f = std::modf(-arg, &i); - if(f == 0.0f) - return expr(std::numeric_limits::quiet_NaN()); - double value = 3.1415926535897932384626433832795 / (std::sin(3.1415926535897932384626433832795*f)*std::exp(lgamma(1.0-arg))); - return expr(static_cast((std::fmod(i, 2.0f)==0.0f) ? -value : value)); - } - if(builtin_isinf(arg)) - return expr(arg); - return expr(static_cast(std::exp(lgamma(static_cast(arg))))); - #endif - } - - /// Floor implementation. - /// \param arg value to round - /// \return rounded value - static half floor(half arg) { return half(binary, round_half(arg.data_)); } - - /// Ceiling implementation. - /// \param arg value to round - /// \return rounded value - static half ceil(half arg) { return half(binary, round_half(arg.data_)); } - - /// Truncation implementation. - /// \param arg value to round - /// \return rounded value - static half trunc(half arg) { return half(binary, round_half(arg.data_)); } - - /// Nearest integer implementation. - /// \param arg value to round - /// \return rounded value - static half round(half arg) { return half(binary, round_half_up(arg.data_)); } - - /// Nearest integer implementation. - /// \param arg value to round - /// \return rounded value - static long lround(half arg) { return detail::half2int_up(arg.data_); } - - /// Nearest integer implementation. - /// \param arg value to round - /// \return rounded value - static half rint(half arg) { return half(binary, round_half(arg.data_)); } - - /// Nearest integer implementation. - /// \param arg value to round - /// \return rounded value - static long lrint(half arg) { return detail::half2int(arg.data_); } - - #if HALF_ENABLE_CPP11_LONG_LONG - /// Nearest integer implementation. - /// \param arg value to round - /// \return rounded value - static long long llround(half arg) { return detail::half2int_up(arg.data_); } - - /// Nearest integer implementation. - /// \param arg value to round - /// \return rounded value - static long long llrint(half arg) { return detail::half2int(arg.data_); } - #endif - - /// Decompression implementation. - /// \param arg number to decompress - /// \param exp address to store exponent at - /// \return normalized significant - static half frexp(half arg, int *exp) - { - int m = arg.data_ & 0x7FFF, e = -14; - if(m >= 0x7C00 || !m) - return *exp = 0, arg; - for(; m<0x400; m<<=1,--e) ; - return *exp = e+(m>>10), half(binary, (arg.data_&0x8000)|0x3800|(m&0x3FF)); - } - - /// Decompression implementation. - /// \param arg number to decompress - /// \param iptr address to store integer part at - /// \return fractional part - static half modf(half arg, half *iptr) - { - unsigned int e = arg.data_ & 0x7FFF; - if(e >= 0x6400) - return *iptr = arg, half(binary, arg.data_&(0x8000U|-(e>0x7C00))); - if(e < 0x3C00) - return iptr->data_ = arg.data_ & 0x8000, arg; - e >>= 10; - unsigned int mask = (1<<(25-e)) - 1, m = arg.data_ & mask; - iptr->data_ = arg.data_ & ~mask; - if(!m) - return half(binary, arg.data_&0x8000); - for(; m<0x400; m<<=1,--e) ; - return half(binary, static_cast((arg.data_&0x8000)|(e<<10)|(m&0x3FF))); - } - - /// Scaling implementation. - /// \param arg number to scale - /// \param exp power of two to scale by - /// \return scaled number - static half scalbln(half arg, long exp) - { - unsigned int m = arg.data_ & 0x7FFF; - if(m >= 0x7C00 || !m) - return arg; - for(; m<0x400; m<<=1,--exp) ; - exp += m >> 10; - uint16 value = arg.data_ & 0x8000; - if(exp > 30) - { - if(half::round_style == std::round_toward_zero) - value |= 0x7BFF; - else if(half::round_style == std::round_toward_infinity) - value |= 0x7C00 - (value>>15); - else if(half::round_style == std::round_toward_neg_infinity) - value |= 0x7BFF + (value>>15); - else - value |= 0x7C00; - } - else if(exp > 0) - value |= (exp<<10) | (m&0x3FF); - else if(exp > -11) - { - m = (m&0x3FF) | 0x400; - if(half::round_style == std::round_to_nearest) - { - m += 1 << -exp; - #if HALF_ROUND_TIES_TO_EVEN - m -= (m>>(1-exp)) & 1; - #endif - } - else if(half::round_style == std::round_toward_infinity) - m += ((value>>15)-1) & ((1<<(1-exp))-1U); - else if(half::round_style == std::round_toward_neg_infinity) - m += -(value>>15) & ((1<<(1-exp))-1U); - value |= m >> (1-exp); - } - else if(half::round_style == std::round_toward_infinity) - value -= (value>>15) - 1; - else if(half::round_style == std::round_toward_neg_infinity) - value += value >> 15; - return half(binary, value); - } - - /// Exponent implementation. - /// \param arg number to query - /// \return floating point exponent - static int ilogb(half arg) - { - int abs = arg.data_ & 0x7FFF; - if(!abs) - return FP_ILOGB0; - if(abs < 0x7C00) - { - int exp = (abs>>10) - 15; - if(abs < 0x400) - for(; abs<0x200; abs<<=1,--exp) ; - return exp; - } - if(abs > 0x7C00) - return FP_ILOGBNAN; - return INT_MAX; - } - - /// Exponent implementation. - /// \param arg number to query - /// \return floating point exponent - static half logb(half arg) - { - int abs = arg.data_ & 0x7FFF; - if(!abs) - return half(binary, 0xFC00); - if(abs < 0x7C00) - { - int exp = (abs>>10) - 15; - if(abs < 0x400) - for(; abs<0x200; abs<<=1,--exp) ; - uint16 bits = (exp<0) << 15; - if(exp) - { - unsigned int m = std::abs(exp) << 6, e = 18; - for(; m<0x400; m<<=1,--e) ; - bits |= (e<<10) + m; - } - return half(binary, bits); - } - if(abs > 0x7C00) - return arg; - return half(binary, 0x7C00); - } - - /// Enumeration implementation. - /// \param from number to increase/decrease - /// \param to direction to enumerate into - /// \return next representable number - static half nextafter(half from, half to) - { - uint16 fabs = from.data_ & 0x7FFF, tabs = to.data_ & 0x7FFF; - if(fabs > 0x7C00) - return from; - if(tabs > 0x7C00 || from.data_ == to.data_ || !(fabs|tabs)) - return to; - if(!fabs) - return half(binary, (to.data_&0x8000)+1); - bool lt = ((fabs==from.data_) ? static_cast(fabs) : -static_cast(fabs)) < - ((tabs==to.data_) ? static_cast(tabs) : -static_cast(tabs)); - return half(binary, from.data_+(((from.data_>>15)^static_cast(lt))<<1)-1); - } - - /// Enumeration implementation. - /// \param from number to increase/decrease - /// \param to direction to enumerate into - /// \return next representable number - static half nexttoward(half from, long double to) - { - if(isnan(from)) - return from; - long double lfrom = static_cast(from); - if(builtin_isnan(to) || lfrom == to) - return half(static_cast(to)); - if(!(from.data_&0x7FFF)) - return half(binary, (static_cast(builtin_signbit(to))<<15)+1); - return half(binary, from.data_+(((from.data_>>15)^static_cast(lfrom0x3FF) ? ((abs>=0x7C00) ? ((abs>0x7C00) ? FP_NAN : FP_INFINITE) : FP_NORMAL) :FP_SUBNORMAL) : FP_ZERO; - } - - /// Classification implementation. - /// \param arg value to classify - /// \retval true if finite number - /// \retval false else - static bool isfinite(half arg) { return (arg.data_&0x7C00) != 0x7C00; } - - /// Classification implementation. - /// \param arg value to classify - /// \retval true if infinite number - /// \retval false else - static bool isinf(half arg) { return (arg.data_&0x7FFF) == 0x7C00; } - - /// Classification implementation. - /// \param arg value to classify - /// \retval true if not a number - /// \retval false else - static bool isnan(half arg) { return (arg.data_&0x7FFF) > 0x7C00; } - - /// Classification implementation. - /// \param arg value to classify - /// \retval true if normal number - /// \retval false else - static bool isnormal(half arg) { return ((arg.data_&0x7C00)!=0) & ((arg.data_&0x7C00)!=0x7C00); } - - /// Sign bit implementation. - /// \param arg value to check - /// \retval true if signed - /// \retval false if unsigned - static bool signbit(half arg) { return (arg.data_&0x8000) != 0; } - - /// Comparison implementation. - /// \param x first operand - /// \param y second operand - /// \retval true if operands equal - /// \retval false else - static bool isequal(half x, half y) { return (x.data_==y.data_ || !((x.data_|y.data_)&0x7FFF)) && !isnan(x); } - - /// Comparison implementation. - /// \param x first operand - /// \param y second operand - /// \retval true if operands not equal - /// \retval false else - static bool isnotequal(half x, half y) { return (x.data_!=y.data_ && ((x.data_|y.data_)&0x7FFF)) || isnan(x); } - - /// Comparison implementation. - /// \param x first operand - /// \param y second operand - /// \retval true if \a x > \a y - /// \retval false else - static bool isgreater(half x, half y) - { - int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF; - return xabs<=0x7C00 && yabs<=0x7C00 && (((xabs==x.data_) ? xabs : -xabs) > ((yabs==y.data_) ? yabs : -yabs)); - } - - /// Comparison implementation. - /// \param x first operand - /// \param y second operand - /// \retval true if \a x >= \a y - /// \retval false else - static bool isgreaterequal(half x, half y) - { - int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF; - return xabs<=0x7C00 && yabs<=0x7C00 && (((xabs==x.data_) ? xabs : -xabs) >= ((yabs==y.data_) ? yabs : -yabs)); - } - - /// Comparison implementation. - /// \param x first operand - /// \param y second operand - /// \retval true if \a x < \a y - /// \retval false else - static bool isless(half x, half y) - { - int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF; - return xabs<=0x7C00 && yabs<=0x7C00 && (((xabs==x.data_) ? xabs : -xabs) < ((yabs==y.data_) ? yabs : -yabs)); - } - - /// Comparison implementation. - /// \param x first operand - /// \param y second operand - /// \retval true if \a x <= \a y - /// \retval false else - static bool islessequal(half x, half y) - { - int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF; - return xabs<=0x7C00 && yabs<=0x7C00 && (((xabs==x.data_) ? xabs : -xabs) <= ((yabs==y.data_) ? yabs : -yabs)); - } - - /// Comparison implementation. - /// \param x first operand - /// \param y second operand - /// \retval true if either \a x > \a y nor \a x < \a y - /// \retval false else - static bool islessgreater(half x, half y) - { - int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF; - if(xabs > 0x7C00 || yabs > 0x7C00) - return false; - int a = (xabs==x.data_) ? xabs : -xabs, b = (yabs==y.data_) ? yabs : -yabs; - return a < b || a > b; - } - - /// Comparison implementation. - /// \param x first operand - /// \param y second operand - /// \retval true if operand unordered - /// \retval false else - static bool isunordered(half x, half y) { return isnan(x) || isnan(y); } - - private: - static double erf(double arg) - { - if(builtin_isinf(arg)) - return (arg<0.0) ? -1.0 : 1.0; - double x2 = arg * arg, ax2 = 0.147 * x2, value = std::sqrt(1.0-std::exp(-x2*(1.2732395447351626861510701069801+ax2)/(1.0+ax2))); - return builtin_signbit(arg) ? -value : value; - } - - static double lgamma(double arg) - { - double v = 1.0; - for(; arg<8.0; ++arg) v *= arg; - double w = 1.0 / (arg*arg); - return (((((((-0.02955065359477124183006535947712*w+0.00641025641025641025641025641026)*w+ - -0.00191752691752691752691752691753)*w+8.4175084175084175084175084175084e-4)*w+ - -5.952380952380952380952380952381e-4)*w+7.9365079365079365079365079365079e-4)*w+ - -0.00277777777777777777777777777778)*w+0.08333333333333333333333333333333)/arg + - 0.91893853320467274178032973640562 - std::log(v) - arg + (arg-0.5) * std::log(arg); - } - }; - - /// Wrapper for unary half-precision functions needing specialization for individual argument types. - /// \tparam T argument type - template struct unary_specialized - { - /// Negation implementation. - /// \param arg value to negate - /// \return negated value - static HALF_CONSTEXPR half negate(half arg) { return half(binary, arg.data_^0x8000); } - - /// Absolute value implementation. - /// \param arg function argument - /// \return absolute value - static half fabs(half arg) { return half(binary, arg.data_&0x7FFF); } - }; - template<> struct unary_specialized - { - static HALF_CONSTEXPR expr negate(float arg) { return expr(-arg); } - static expr fabs(float arg) { return expr(std::fabs(arg)); } - }; - - /// Wrapper for binary half-precision functions needing specialization for individual argument types. - /// \tparam T first argument type - /// \tparam U first argument type - template struct binary_specialized - { - /// Minimum implementation. - /// \param x first operand - /// \param y second operand - /// \return minimum value - static expr fmin(float x, float y) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::fmin(x, y)); - #else - if(builtin_isnan(x)) - return expr(y); - if(builtin_isnan(y)) - return expr(x); - return expr(std::min(x, y)); - #endif - } - - /// Maximum implementation. - /// \param x first operand - /// \param y second operand - /// \return maximum value - static expr fmax(float x, float y) - { - #if HALF_ENABLE_CPP11_CMATH - return expr(std::fmax(x, y)); - #else - if(builtin_isnan(x)) - return expr(y); - if(builtin_isnan(y)) - return expr(x); - return expr(std::max(x, y)); - #endif - } - }; - template<> struct binary_specialized - { - static half fmin(half x, half y) - { - int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF; - if(xabs > 0x7C00) - return y; - if(yabs > 0x7C00) - return x; - return (((xabs==x.data_) ? xabs : -xabs) > ((yabs==y.data_) ? yabs : -yabs)) ? y : x; - } - static half fmax(half x, half y) - { - int xabs = x.data_ & 0x7FFF, yabs = y.data_ & 0x7FFF; - if(xabs > 0x7C00) - return y; - if(yabs > 0x7C00) - return x; - return (((xabs==x.data_) ? xabs : -xabs) < ((yabs==y.data_) ? yabs : -yabs)) ? y : x; - } - }; - - /// Helper class for half casts. - /// This class template has to be specialized for all valid cast argument to define an appropriate static `cast` member - /// function and a corresponding `type` member denoting its return type. - /// \tparam T destination type - /// \tparam U source type - /// \tparam R rounding mode to use - template struct half_caster {}; - template struct half_caster - { - #if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS - static_assert(std::is_arithmetic::value, "half_cast from non-arithmetic type unsupported"); - #endif - - static half cast(U arg) { return cast_impl(arg, is_float()); }; - - private: - static half cast_impl(U arg, true_type) { return half(binary, float2half(arg)); } - static half cast_impl(U arg, false_type) { return half(binary, int2half(arg)); } - }; - template struct half_caster - { - #if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS - static_assert(std::is_arithmetic::value, "half_cast to non-arithmetic type unsupported"); - #endif - - static T cast(half arg) { return cast_impl(arg, is_float()); } - - private: - static T cast_impl(half arg, true_type) { return half2float(arg.data_); } - static T cast_impl(half arg, false_type) { return half2int(arg.data_); } - }; - template struct half_caster - { - #if HALF_ENABLE_CPP11_STATIC_ASSERT && HALF_ENABLE_CPP11_TYPE_TRAITS - static_assert(std::is_arithmetic::value, "half_cast to non-arithmetic type unsupported"); - #endif - - static T cast(expr arg) { return cast_impl(arg, is_float()); } - - private: - static T cast_impl(float arg, true_type) { return static_cast(arg); } - static T cast_impl(half arg, false_type) { return half2int(arg.data_); } - }; - template struct half_caster - { - static half cast(half arg) { return arg; } - }; - template struct half_caster : half_caster {}; - - /// \name Comparison operators - /// \{ - - /// Comparison for equality. - /// \param x first operand - /// \param y second operand - /// \retval true if operands equal - /// \retval false else - template typename enable::type operator==(T x, U y) { return functions::isequal(x, y); } - - /// Comparison for inequality. - /// \param x first operand - /// \param y second operand - /// \retval true if operands not equal - /// \retval false else - template typename enable::type operator!=(T x, U y) { return functions::isnotequal(x, y); } - - /// Comparison for less than. - /// \param x first operand - /// \param y second operand - /// \retval true if \a x less than \a y - /// \retval false else - template typename enable::type operator<(T x, U y) { return functions::isless(x, y); } - - /// Comparison for greater than. - /// \param x first operand - /// \param y second operand - /// \retval true if \a x greater than \a y - /// \retval false else - template typename enable::type operator>(T x, U y) { return functions::isgreater(x, y); } - - /// Comparison for less equal. - /// \param x first operand - /// \param y second operand - /// \retval true if \a x less equal \a y - /// \retval false else - template typename enable::type operator<=(T x, U y) { return functions::islessequal(x, y); } - - /// Comparison for greater equal. - /// \param x first operand - /// \param y second operand - /// \retval true if \a x greater equal \a y - /// \retval false else - template typename enable::type operator>=(T x, U y) { return functions::isgreaterequal(x, y); } - - /// \} - /// \name Arithmetic operators - /// \{ - - /// Add halfs. - /// \param x left operand - /// \param y right operand - /// \return sum of half expressions - template typename enable::type operator+(T x, U y) { return functions::plus(x, y); } - - /// Subtract halfs. - /// \param x left operand - /// \param y right operand - /// \return difference of half expressions - template typename enable::type operator-(T x, U y) { return functions::minus(x, y); } - - /// Multiply halfs. - /// \param x left operand - /// \param y right operand - /// \return product of half expressions - template typename enable::type operator*(T x, U y) { return functions::multiplies(x, y); } - - /// Divide halfs. - /// \param x left operand - /// \param y right operand - /// \return quotient of half expressions - template typename enable::type operator/(T x, U y) { return functions::divides(x, y); } - - /// Identity. - /// \param arg operand - /// \return unchanged operand - template HALF_CONSTEXPR typename enable::type operator+(T arg) { return arg; } - - /// Negation. - /// \param arg operand - /// \return negated operand - template HALF_CONSTEXPR typename enable::type operator-(T arg) { return unary_specialized::negate(arg); } - - /// \} - /// \name Input and output - /// \{ - - /// Output operator. - /// \param out output stream to write into - /// \param arg half expression to write - /// \return reference to output stream - template typename enable&,T>::type - operator<<(std::basic_ostream &out, T arg) { return functions::write(out, arg); } - - /// Input operator. - /// \param in input stream to read from - /// \param arg half to read into - /// \return reference to input stream - template std::basic_istream& - operator>>(std::basic_istream &in, half &arg) { return functions::read(in, arg); } - - /// \} - /// \name Basic mathematical operations - /// \{ - - /// Absolute value. - /// \param arg operand - /// \return absolute value of \a arg -// template typename enable::type abs(T arg) { return unary_specialized::fabs(arg); } - inline half abs(half arg) { return unary_specialized::fabs(arg); } - inline expr abs(expr arg) { return unary_specialized::fabs(arg); } - - /// Absolute value. - /// \param arg operand - /// \return absolute value of \a arg -// template typename enable::type fabs(T arg) { return unary_specialized::fabs(arg); } - inline half fabs(half arg) { return unary_specialized::fabs(arg); } - inline expr fabs(expr arg) { return unary_specialized::fabs(arg); } - - /// Remainder of division. - /// \param x first operand - /// \param y second operand - /// \return remainder of floating point division. -// template typename enable::type fmod(T x, U y) { return functions::fmod(x, y); } - inline expr fmod(half x, half y) { return functions::fmod(x, y); } - inline expr fmod(half x, expr y) { return functions::fmod(x, y); } - inline expr fmod(expr x, half y) { return functions::fmod(x, y); } - inline expr fmod(expr x, expr y) { return functions::fmod(x, y); } - - /// Remainder of division. - /// \param x first operand - /// \param y second operand - /// \return remainder of floating point division. -// template typename enable::type remainder(T x, U y) { return functions::remainder(x, y); } - inline expr remainder(half x, half y) { return functions::remainder(x, y); } - inline expr remainder(half x, expr y) { return functions::remainder(x, y); } - inline expr remainder(expr x, half y) { return functions::remainder(x, y); } - inline expr remainder(expr x, expr y) { return functions::remainder(x, y); } - - /// Remainder of division. - /// \param x first operand - /// \param y second operand - /// \param quo address to store some bits of quotient at - /// \return remainder of floating point division. -// template typename enable::type remquo(T x, U y, int *quo) { return functions::remquo(x, y, quo); } - inline expr remquo(half x, half y, int *quo) { return functions::remquo(x, y, quo); } - inline expr remquo(half x, expr y, int *quo) { return functions::remquo(x, y, quo); } - inline expr remquo(expr x, half y, int *quo) { return functions::remquo(x, y, quo); } - inline expr remquo(expr x, expr y, int *quo) { return functions::remquo(x, y, quo); } - - /// Fused multiply add. - /// \param x first operand - /// \param y second operand - /// \param z third operand - /// \return ( \a x * \a y ) + \a z rounded as one operation. -// template typename enable::type fma(T x, U y, V z) { return functions::fma(x, y, z); } - inline expr fma(half x, half y, half z) { return functions::fma(x, y, z); } - inline expr fma(half x, half y, expr z) { return functions::fma(x, y, z); } - inline expr fma(half x, expr y, half z) { return functions::fma(x, y, z); } - inline expr fma(half x, expr y, expr z) { return functions::fma(x, y, z); } - inline expr fma(expr x, half y, half z) { return functions::fma(x, y, z); } - inline expr fma(expr x, half y, expr z) { return functions::fma(x, y, z); } - inline expr fma(expr x, expr y, half z) { return functions::fma(x, y, z); } - inline expr fma(expr x, expr y, expr z) { return functions::fma(x, y, z); } - - /// Maximum of half expressions. - /// \param x first operand - /// \param y second operand - /// \return maximum of operands -// template typename result::type fmax(T x, U y) { return binary_specialized::fmax(x, y); } - inline half fmax(half x, half y) { return binary_specialized::fmax(x, y); } - inline expr fmax(half x, expr y) { return binary_specialized::fmax(x, y); } - inline expr fmax(expr x, half y) { return binary_specialized::fmax(x, y); } - inline expr fmax(expr x, expr y) { return binary_specialized::fmax(x, y); } - - /// Minimum of half expressions. - /// \param x first operand - /// \param y second operand - /// \return minimum of operands -// template typename result::type fmin(T x, U y) { return binary_specialized::fmin(x, y); } - inline half fmin(half x, half y) { return binary_specialized::fmin(x, y); } - inline expr fmin(half x, expr y) { return binary_specialized::fmin(x, y); } - inline expr fmin(expr x, half y) { return binary_specialized::fmin(x, y); } - inline expr fmin(expr x, expr y) { return binary_specialized::fmin(x, y); } - - /// Positive difference. - /// \param x first operand - /// \param y second operand - /// \return \a x - \a y or 0 if difference negative -// template typename enable::type fdim(T x, U y) { return functions::fdim(x, y); } - inline expr fdim(half x, half y) { return functions::fdim(x, y); } - inline expr fdim(half x, expr y) { return functions::fdim(x, y); } - inline expr fdim(expr x, half y) { return functions::fdim(x, y); } - inline expr fdim(expr x, expr y) { return functions::fdim(x, y); } - - /// Get NaN value. - /// \return quiet NaN - inline half nanh(const char*) { return functions::nanh(); } - - /// \} - /// \name Exponential functions - /// \{ - - /// Exponential function. - /// \param arg function argument - /// \return e raised to \a arg -// template typename enable::type exp(T arg) { return functions::exp(arg); } - inline expr exp(half arg) { return functions::exp(arg); } - inline expr exp(expr arg) { return functions::exp(arg); } - - /// Exponential minus one. - /// \param arg function argument - /// \return e raised to \a arg subtracted by 1 -// template typename enable::type expm1(T arg) { return functions::expm1(arg); } - inline expr expm1(half arg) { return functions::expm1(arg); } - inline expr expm1(expr arg) { return functions::expm1(arg); } - - /// Binary exponential. - /// \param arg function argument - /// \return 2 raised to \a arg -// template typename enable::type exp2(T arg) { return functions::exp2(arg); } - inline expr exp2(half arg) { return functions::exp2(arg); } - inline expr exp2(expr arg) { return functions::exp2(arg); } - - /// Natural logorithm. - /// \param arg function argument - /// \return logarithm of \a arg to base e -// template typename enable::type log(T arg) { return functions::log(arg); } - inline expr log(half arg) { return functions::log(arg); } - inline expr log(expr arg) { return functions::log(arg); } - - /// Common logorithm. - /// \param arg function argument - /// \return logarithm of \a arg to base 10 -// template typename enable::type log10(T arg) { return functions::log10(arg); } - inline expr log10(half arg) { return functions::log10(arg); } - inline expr log10(expr arg) { return functions::log10(arg); } - - /// Natural logorithm. - /// \param arg function argument - /// \return logarithm of \a arg plus 1 to base e -// template typename enable::type log1p(T arg) { return functions::log1p(arg); } - inline expr log1p(half arg) { return functions::log1p(arg); } - inline expr log1p(expr arg) { return functions::log1p(arg); } - - /// Binary logorithm. - /// \param arg function argument - /// \return logarithm of \a arg to base 2 -// template typename enable::type log2(T arg) { return functions::log2(arg); } - inline expr log2(half arg) { return functions::log2(arg); } - inline expr log2(expr arg) { return functions::log2(arg); } - - /// \} - /// \name Power functions - /// \{ - - /// Square root. - /// \param arg function argument - /// \return square root of \a arg -// template typename enable::type sqrt(T arg) { return functions::sqrt(arg); } - inline expr sqrt(half arg) { return functions::sqrt(arg); } - inline expr sqrt(expr arg) { return functions::sqrt(arg); } - - /// Cubic root. - /// \param arg function argument - /// \return cubic root of \a arg -// template typename enable::type cbrt(T arg) { return functions::cbrt(arg); } - inline expr cbrt(half arg) { return functions::cbrt(arg); } - inline expr cbrt(expr arg) { return functions::cbrt(arg); } - - /// Hypotenuse function. - /// \param x first argument - /// \param y second argument - /// \return square root of sum of squares without internal over- or underflows -// template typename enable::type hypot(T x, U y) { return functions::hypot(x, y); } - inline expr hypot(half x, half y) { return functions::hypot(x, y); } - inline expr hypot(half x, expr y) { return functions::hypot(x, y); } - inline expr hypot(expr x, half y) { return functions::hypot(x, y); } - inline expr hypot(expr x, expr y) { return functions::hypot(x, y); } - - /// Power function. - /// \param base first argument - /// \param exp second argument - /// \return \a base raised to \a exp -// template typename enable::type pow(T base, U exp) { return functions::pow(base, exp); } - inline expr pow(half base, half exp) { return functions::pow(base, exp); } - inline expr pow(half base, expr exp) { return functions::pow(base, exp); } - inline expr pow(expr base, half exp) { return functions::pow(base, exp); } - inline expr pow(expr base, expr exp) { return functions::pow(base, exp); } - - /// \} - /// \name Trigonometric functions - /// \{ - - /// Sine function. - /// \param arg function argument - /// \return sine value of \a arg -// template typename enable::type sin(T arg) { return functions::sin(arg); } - inline expr sin(half arg) { return functions::sin(arg); } - inline expr sin(expr arg) { return functions::sin(arg); } - - /// Cosine function. - /// \param arg function argument - /// \return cosine value of \a arg -// template typename enable::type cos(T arg) { return functions::cos(arg); } - inline expr cos(half arg) { return functions::cos(arg); } - inline expr cos(expr arg) { return functions::cos(arg); } - - /// Tangent function. - /// \param arg function argument - /// \return tangent value of \a arg -// template typename enable::type tan(T arg) { return functions::tan(arg); } - inline expr tan(half arg) { return functions::tan(arg); } - inline expr tan(expr arg) { return functions::tan(arg); } - - /// Arc sine. - /// \param arg function argument - /// \return arc sine value of \a arg -// template typename enable::type asin(T arg) { return functions::asin(arg); } - inline expr asin(half arg) { return functions::asin(arg); } - inline expr asin(expr arg) { return functions::asin(arg); } - - /// Arc cosine function. - /// \param arg function argument - /// \return arc cosine value of \a arg -// template typename enable::type acos(T arg) { return functions::acos(arg); } - inline expr acos(half arg) { return functions::acos(arg); } - inline expr acos(expr arg) { return functions::acos(arg); } - - /// Arc tangent function. - /// \param arg function argument - /// \return arc tangent value of \a arg -// template typename enable::type atan(T arg) { return functions::atan(arg); } - inline expr atan(half arg) { return functions::atan(arg); } - inline expr atan(expr arg) { return functions::atan(arg); } - - /// Arc tangent function. - /// \param x first argument - /// \param y second argument - /// \return arc tangent value -// template typename enable::type atan2(T x, U y) { return functions::atan2(x, y); } - inline expr atan2(half x, half y) { return functions::atan2(x, y); } - inline expr atan2(half x, expr y) { return functions::atan2(x, y); } - inline expr atan2(expr x, half y) { return functions::atan2(x, y); } - inline expr atan2(expr x, expr y) { return functions::atan2(x, y); } - - /// \} - /// \name Hyperbolic functions - /// \{ - - /// Hyperbolic sine. - /// \param arg function argument - /// \return hyperbolic sine value of \a arg -// template typename enable::type sinh(T arg) { return functions::sinh(arg); } - inline expr sinh(half arg) { return functions::sinh(arg); } - inline expr sinh(expr arg) { return functions::sinh(arg); } - - /// Hyperbolic cosine. - /// \param arg function argument - /// \return hyperbolic cosine value of \a arg -// template typename enable::type cosh(T arg) { return functions::cosh(arg); } - inline expr cosh(half arg) { return functions::cosh(arg); } - inline expr cosh(expr arg) { return functions::cosh(arg); } - - /// Hyperbolic tangent. - /// \param arg function argument - /// \return hyperbolic tangent value of \a arg -// template typename enable::type tanh(T arg) { return functions::tanh(arg); } - inline expr tanh(half arg) { return functions::tanh(arg); } - inline expr tanh(expr arg) { return functions::tanh(arg); } - - /// Hyperbolic area sine. - /// \param arg function argument - /// \return area sine value of \a arg -// template typename enable::type asinh(T arg) { return functions::asinh(arg); } - inline expr asinh(half arg) { return functions::asinh(arg); } - inline expr asinh(expr arg) { return functions::asinh(arg); } - - /// Hyperbolic area cosine. - /// \param arg function argument - /// \return area cosine value of \a arg -// template typename enable::type acosh(T arg) { return functions::acosh(arg); } - inline expr acosh(half arg) { return functions::acosh(arg); } - inline expr acosh(expr arg) { return functions::acosh(arg); } - - /// Hyperbolic area tangent. - /// \param arg function argument - /// \return area tangent value of \a arg -// template typename enable::type atanh(T arg) { return functions::atanh(arg); } - inline expr atanh(half arg) { return functions::atanh(arg); } - inline expr atanh(expr arg) { return functions::atanh(arg); } - - /// \} - /// \name Error and gamma functions - /// \{ - - /// Error function. - /// \param arg function argument - /// \return error function value of \a arg -// template typename enable::type erf(T arg) { return functions::erf(arg); } - inline expr erf(half arg) { return functions::erf(arg); } - inline expr erf(expr arg) { return functions::erf(arg); } - - /// Complementary error function. - /// \param arg function argument - /// \return 1 minus error function value of \a arg -// template typename enable::type erfc(T arg) { return functions::erfc(arg); } - inline expr erfc(half arg) { return functions::erfc(arg); } - inline expr erfc(expr arg) { return functions::erfc(arg); } - - /// Natural logarithm of gamma function. - /// \param arg function argument - /// \return natural logarith of gamma function for \a arg -// template typename enable::type lgamma(T arg) { return functions::lgamma(arg); } - inline expr lgamma(half arg) { return functions::lgamma(arg); } - inline expr lgamma(expr arg) { return functions::lgamma(arg); } - - /// Gamma function. - /// \param arg function argument - /// \return gamma function value of \a arg -// template typename enable::type tgamma(T arg) { return functions::tgamma(arg); } - inline expr tgamma(half arg) { return functions::tgamma(arg); } - inline expr tgamma(expr arg) { return functions::tgamma(arg); } - - /// \} - /// \name Rounding - /// \{ - - /// Nearest integer not less than half value. - /// \param arg half to round - /// \return nearest integer not less than \a arg -// template typename enable::type ceil(T arg) { return functions::ceil(arg); } - inline half ceil(half arg) { return functions::ceil(arg); } - inline half ceil(expr arg) { return functions::ceil(arg); } - - /// Nearest integer not greater than half value. - /// \param arg half to round - /// \return nearest integer not greater than \a arg -// template typename enable::type floor(T arg) { return functions::floor(arg); } - inline half floor(half arg) { return functions::floor(arg); } - inline half floor(expr arg) { return functions::floor(arg); } - - /// Nearest integer not greater in magnitude than half value. - /// \param arg half to round - /// \return nearest integer not greater in magnitude than \a arg -// template typename enable::type trunc(T arg) { return functions::trunc(arg); } - inline half trunc(half arg) { return functions::trunc(arg); } - inline half trunc(expr arg) { return functions::trunc(arg); } - - /// Nearest integer. - /// \param arg half to round - /// \return nearest integer, rounded away from zero in half-way cases -// template typename enable::type round(T arg) { return functions::round(arg); } - inline half round(half arg) { return functions::round(arg); } - inline half round(expr arg) { return functions::round(arg); } - - /// Nearest integer. - /// \param arg half to round - /// \return nearest integer, rounded away from zero in half-way cases -// template typename enable::type lround(T arg) { return functions::lround(arg); } - inline long lround(half arg) { return functions::lround(arg); } - inline long lround(expr arg) { return functions::lround(arg); } - - /// Nearest integer using half's internal rounding mode. - /// \param arg half expression to round - /// \return nearest integer using default rounding mode -// template typename enable::type nearbyint(T arg) { return functions::nearbyint(arg); } - inline half nearbyint(half arg) { return functions::rint(arg); } - inline half nearbyint(expr arg) { return functions::rint(arg); } - - /// Nearest integer using half's internal rounding mode. - /// \param arg half expression to round - /// \return nearest integer using default rounding mode -// template typename enable::type rint(T arg) { return functions::rint(arg); } - inline half rint(half arg) { return functions::rint(arg); } - inline half rint(expr arg) { return functions::rint(arg); } - - /// Nearest integer using half's internal rounding mode. - /// \param arg half expression to round - /// \return nearest integer using default rounding mode -// template typename enable::type lrint(T arg) { return functions::lrint(arg); } - inline long lrint(half arg) { return functions::lrint(arg); } - inline long lrint(expr arg) { return functions::lrint(arg); } - #if HALF_ENABLE_CPP11_LONG_LONG - /// Nearest integer. - /// \param arg half to round - /// \return nearest integer, rounded away from zero in half-way cases -// template typename enable::type llround(T arg) { return functions::llround(arg); } - inline long long llround(half arg) { return functions::llround(arg); } - inline long long llround(expr arg) { return functions::llround(arg); } - - /// Nearest integer using half's internal rounding mode. - /// \param arg half expression to round - /// \return nearest integer using default rounding mode -// template typename enable::type llrint(T arg) { return functions::llrint(arg); } - inline long long llrint(half arg) { return functions::llrint(arg); } - inline long long llrint(expr arg) { return functions::llrint(arg); } - #endif - - /// \} - /// \name Floating point manipulation - /// \{ - - /// Decompress floating point number. - /// \param arg number to decompress - /// \param exp address to store exponent at - /// \return significant in range [0.5, 1) -// template typename enable::type frexp(T arg, int *exp) { return functions::frexp(arg, exp); } - inline half frexp(half arg, int *exp) { return functions::frexp(arg, exp); } - inline half frexp(expr arg, int *exp) { return functions::frexp(arg, exp); } - - /// Multiply by power of two. - /// \param arg number to modify - /// \param exp power of two to multiply with - /// \return \a arg multiplied by 2 raised to \a exp -// template typename enable::type ldexp(T arg, int exp) { return functions::scalbln(arg, exp); } - inline half ldexp(half arg, int exp) { return functions::scalbln(arg, exp); } - inline half ldexp(expr arg, int exp) { return functions::scalbln(arg, exp); } - - /// Extract integer and fractional parts. - /// \param arg number to decompress - /// \param iptr address to store integer part at - /// \return fractional part -// template typename enable::type modf(T arg, half *iptr) { return functions::modf(arg, iptr); } - inline half modf(half arg, half *iptr) { return functions::modf(arg, iptr); } - inline half modf(expr arg, half *iptr) { return functions::modf(arg, iptr); } - - /// Multiply by power of two. - /// \param arg number to modify - /// \param exp power of two to multiply with - /// \return \a arg multiplied by 2 raised to \a exp -// template typename enable::type scalbn(T arg, int exp) { return functions::scalbln(arg, exp); } - inline half scalbn(half arg, int exp) { return functions::scalbln(arg, exp); } - inline half scalbn(expr arg, int exp) { return functions::scalbln(arg, exp); } - - /// Multiply by power of two. - /// \param arg number to modify - /// \param exp power of two to multiply with - /// \return \a arg multiplied by 2 raised to \a exp -// template typename enable::type scalbln(T arg, long exp) { return functions::scalbln(arg, exp); } - inline half scalbln(half arg, long exp) { return functions::scalbln(arg, exp); } - inline half scalbln(expr arg, long exp) { return functions::scalbln(arg, exp); } - - /// Extract exponent. - /// \param arg number to query - /// \return floating point exponent - /// \retval FP_ILOGB0 for zero - /// \retval FP_ILOGBNAN for NaN - /// \retval MAX_INT for infinity -// template typename enable::type ilogb(T arg) { return functions::ilogb(arg); } - inline int ilogb(half arg) { return functions::ilogb(arg); } - inline int ilogb(expr arg) { return functions::ilogb(arg); } - - /// Extract exponent. - /// \param arg number to query - /// \return floating point exponent -// template typename enable::type logb(T arg) { return functions::logb(arg); } - inline half logb(half arg) { return functions::logb(arg); } - inline half logb(expr arg) { return functions::logb(arg); } - - /// Next representable value. - /// \param from value to compute next representable value for - /// \param to direction towards which to compute next value - /// \return next representable value after \a from in direction towards \a to -// template typename enable::type nextafter(T from, U to) { return functions::nextafter(from, to); } - inline half nextafter(half from, half to) { return functions::nextafter(from, to); } - inline half nextafter(half from, expr to) { return functions::nextafter(from, to); } - inline half nextafter(expr from, half to) { return functions::nextafter(from, to); } - inline half nextafter(expr from, expr to) { return functions::nextafter(from, to); } - - /// Next representable value. - /// \param from value to compute next representable value for - /// \param to direction towards which to compute next value - /// \return next representable value after \a from in direction towards \a to -// template typename enable::type nexttoward(T from, long double to) { return functions::nexttoward(from, to); } - inline half nexttoward(half from, long double to) { return functions::nexttoward(from, to); } - inline half nexttoward(expr from, long double to) { return functions::nexttoward(from, to); } - - /// Take sign. - /// \param x value to change sign for - /// \param y value to take sign from - /// \return value equal to \a x in magnitude and to \a y in sign -// template typename enable::type copysign(T x, U y) { return functions::copysign(x, y); } - inline half copysign(half x, half y) { return functions::copysign(x, y); } - inline half copysign(half x, expr y) { return functions::copysign(x, y); } - inline half copysign(expr x, half y) { return functions::copysign(x, y); } - inline half copysign(expr x, expr y) { return functions::copysign(x, y); } - - /// \} - /// \name Floating point classification - /// \{ - - - /// Classify floating point value. - /// \param arg number to classify - /// \retval FP_ZERO for positive and negative zero - /// \retval FP_SUBNORMAL for subnormal numbers - /// \retval FP_INFINITY for positive and negative infinity - /// \retval FP_NAN for NaNs - /// \retval FP_NORMAL for all other (normal) values -// template typename enable::type fpclassify(T arg) { return functions::fpclassify(arg); } - inline int fpclassify(half arg) { return functions::fpclassify(arg); } - inline int fpclassify(expr arg) { return functions::fpclassify(arg); } - - /// Check if finite number. - /// \param arg number to check - /// \retval true if neither infinity nor NaN - /// \retval false else -// template typename enable::type isfinite(T arg) { return functions::isfinite(arg); } - inline bool isfinite(half arg) { return functions::isfinite(arg); } - inline bool isfinite(expr arg) { return functions::isfinite(arg); } - - /// Check for infinity. - /// \param arg number to check - /// \retval true for positive or negative infinity - /// \retval false else -// template typename enable::type isinf(T arg) { return functions::isinf(arg); } - inline bool isinf(half arg) { return functions::isinf(arg); } - inline bool isinf(expr arg) { return functions::isinf(arg); } - - /// Check for NaN. - /// \param arg number to check - /// \retval true for NaNs - /// \retval false else -// template typename enable::type isnan(T arg) { return functions::isnan(arg); } - inline bool isnan(half arg) { return functions::isnan(arg); } - inline bool isnan(expr arg) { return functions::isnan(arg); } - - /// Check if normal number. - /// \param arg number to check - /// \retval true if normal number - /// \retval false if either subnormal, zero, infinity or NaN -// template typename enable::type isnormal(T arg) { return functions::isnormal(arg); } - inline bool isnormal(half arg) { return functions::isnormal(arg); } - inline bool isnormal(expr arg) { return functions::isnormal(arg); } - - /// Check sign. - /// \param arg number to check - /// \retval true for negative number - /// \retval false for positive number -// template typename enable::type signbit(T arg) { return functions::signbit(arg); } - inline bool signbit(half arg) { return functions::signbit(arg); } - inline bool signbit(expr arg) { return functions::signbit(arg); } - - /// \} - /// \name Comparison - /// \{ - - /// Comparison for greater than. - /// \param x first operand - /// \param y second operand - /// \retval true if \a x greater than \a y - /// \retval false else -// template typename enable::type isgreater(T x, U y) { return functions::isgreater(x, y); } - inline bool isgreater(half x, half y) { return functions::isgreater(x, y); } - inline bool isgreater(half x, expr y) { return functions::isgreater(x, y); } - inline bool isgreater(expr x, half y) { return functions::isgreater(x, y); } - inline bool isgreater(expr x, expr y) { return functions::isgreater(x, y); } - - /// Comparison for greater equal. - /// \param x first operand - /// \param y second operand - /// \retval true if \a x greater equal \a y - /// \retval false else -// template typename enable::type isgreaterequal(T x, U y) { return functions::isgreaterequal(x, y); } - inline bool isgreaterequal(half x, half y) { return functions::isgreaterequal(x, y); } - inline bool isgreaterequal(half x, expr y) { return functions::isgreaterequal(x, y); } - inline bool isgreaterequal(expr x, half y) { return functions::isgreaterequal(x, y); } - inline bool isgreaterequal(expr x, expr y) { return functions::isgreaterequal(x, y); } - - /// Comparison for less than. - /// \param x first operand - /// \param y second operand - /// \retval true if \a x less than \a y - /// \retval false else -// template typename enable::type isless(T x, U y) { return functions::isless(x, y); } - inline bool isless(half x, half y) { return functions::isless(x, y); } - inline bool isless(half x, expr y) { return functions::isless(x, y); } - inline bool isless(expr x, half y) { return functions::isless(x, y); } - inline bool isless(expr x, expr y) { return functions::isless(x, y); } - - /// Comparison for less equal. - /// \param x first operand - /// \param y second operand - /// \retval true if \a x less equal \a y - /// \retval false else -// template typename enable::type islessequal(T x, U y) { return functions::islessequal(x, y); } - inline bool islessequal(half x, half y) { return functions::islessequal(x, y); } - inline bool islessequal(half x, expr y) { return functions::islessequal(x, y); } - inline bool islessequal(expr x, half y) { return functions::islessequal(x, y); } - inline bool islessequal(expr x, expr y) { return functions::islessequal(x, y); } - - /// Comarison for less or greater. - /// \param x first operand - /// \param y second operand - /// \retval true if either less or greater - /// \retval false else -// template typename enable::type islessgreater(T x, U y) { return functions::islessgreater(x, y); } - inline bool islessgreater(half x, half y) { return functions::islessgreater(x, y); } - inline bool islessgreater(half x, expr y) { return functions::islessgreater(x, y); } - inline bool islessgreater(expr x, half y) { return functions::islessgreater(x, y); } - inline bool islessgreater(expr x, expr y) { return functions::islessgreater(x, y); } - - /// Check if unordered. - /// \param x first operand - /// \param y second operand - /// \retval true if unordered (one or two NaN operands) - /// \retval false else -// template typename enable::type isunordered(T x, U y) { return functions::isunordered(x, y); } - inline bool isunordered(half x, half y) { return functions::isunordered(x, y); } - inline bool isunordered(half x, expr y) { return functions::isunordered(x, y); } - inline bool isunordered(expr x, half y) { return functions::isunordered(x, y); } - inline bool isunordered(expr x, expr y) { return functions::isunordered(x, y); } - - /// \name Casting - /// \{ - - /// Cast to or from half-precision floating point number. - /// This casts between [half](\ref half_float::half) and any built-in arithmetic type. The values are converted - /// directly using the given rounding mode, without any roundtrip over `float` that a `static_cast` would otherwise do. - /// It uses the default rounding mode. - /// - /// Using this cast with neither of the two types being a [half](\ref half_float::half) or with any of the two types - /// not being a built-in arithmetic type (apart from [half](\ref half_float::half), of course) results in a compiler - /// error and casting between [half](\ref half_float::half)s is just a no-op. - /// \tparam T destination type (half or built-in arithmetic type) - /// \tparam U source type (half or built-in arithmetic type) - /// \param arg value to cast - /// \return \a arg converted to destination type - template T half_cast(U arg) { return half_caster::cast(arg); } - - /// Cast to or from half-precision floating point number. - /// This casts between [half](\ref half_float::half) and any built-in arithmetic type. The values are converted - /// directly using the given rounding mode, without any roundtrip over `float` that a `static_cast` would otherwise do. - /// - /// Using this cast with neither of the two types being a [half](\ref half_float::half) or with any of the two types - /// not being a built-in arithmetic type (apart from [half](\ref half_float::half), of course) results in a compiler - /// error and casting between [half](\ref half_float::half)s is just a no-op. - /// \tparam T destination type (half or built-in arithmetic type) - /// \tparam R rounding mode to use. - /// \tparam U source type (half or built-in arithmetic type) - /// \param arg value to cast - /// \return \a arg converted to destination type - template T half_cast(U arg) { return half_caster::cast(arg); } - /// \} - } - - using detail::operator==; - using detail::operator!=; - using detail::operator<; - using detail::operator>; - using detail::operator<=; - using detail::operator>=; - using detail::operator+; - using detail::operator-; - using detail::operator*; - using detail::operator/; - using detail::operator<<; - using detail::operator>>; - - using detail::abs; - using detail::fabs; - using detail::fmod; - using detail::remainder; - using detail::remquo; - using detail::fma; - using detail::fmax; - using detail::fmin; - using detail::fdim; - using detail::nanh; - using detail::exp; - using detail::expm1; - using detail::exp2; - using detail::log; - using detail::log10; - using detail::log1p; - using detail::log2; - using detail::sqrt; - using detail::cbrt; - using detail::hypot; - using detail::pow; - using detail::sin; - using detail::cos; - using detail::tan; - using detail::asin; - using detail::acos; - using detail::atan; - using detail::atan2; - using detail::sinh; - using detail::cosh; - using detail::tanh; - using detail::asinh; - using detail::acosh; - using detail::atanh; - using detail::erf; - using detail::erfc; - using detail::lgamma; - using detail::tgamma; - using detail::ceil; - using detail::floor; - using detail::trunc; - using detail::round; - using detail::lround; - using detail::nearbyint; - using detail::rint; - using detail::lrint; -#if HALF_ENABLE_CPP11_LONG_LONG - using detail::llround; - using detail::llrint; -#endif - using detail::frexp; - using detail::ldexp; - using detail::modf; - using detail::scalbn; - using detail::scalbln; - using detail::ilogb; - using detail::logb; - using detail::nextafter; - using detail::nexttoward; - using detail::copysign; - using detail::fpclassify; - using detail::isfinite; - using detail::isinf; - using detail::isnan; - using detail::isnormal; - using detail::signbit; - using detail::isgreater; - using detail::isgreaterequal; - using detail::isless; - using detail::islessequal; - using detail::islessgreater; - using detail::isunordered; - - using detail::half_cast; -} - - -/// Extensions to the C++ standard library. -namespace std -{ - /// Numeric limits for half-precision floats. - /// Because of the underlying single-precision implementation of many operations, it inherits some properties from - /// `std::numeric_limits`. - template<> class numeric_limits : public numeric_limits - { - public: - /// Supports signed values. - static HALF_CONSTEXPR_CONST bool is_signed = true; - - /// Is not exact. - static HALF_CONSTEXPR_CONST bool is_exact = false; - - /// Doesn't provide modulo arithmetic. - static HALF_CONSTEXPR_CONST bool is_modulo = false; - - /// IEEE conformant. - static HALF_CONSTEXPR_CONST bool is_iec559 = true; - - /// Supports infinity. - static HALF_CONSTEXPR_CONST bool has_infinity = true; - - /// Supports quiet NaNs. - static HALF_CONSTEXPR_CONST bool has_quiet_NaN = true; - - /// Supports subnormal values. - static HALF_CONSTEXPR_CONST float_denorm_style has_denorm = denorm_present; - - /// Rounding mode. - /// Due to the mix of internal single-precision computations (using the rounding mode of the underlying - /// single-precision implementation) with the rounding mode of the single-to-half conversions, the actual rounding - /// mode might be `std::round_indeterminate` if the default half-precision rounding mode doesn't match the - /// single-precision rounding mode. - static HALF_CONSTEXPR_CONST float_round_style round_style = (std::numeric_limits::round_style== - half_float::half::round_style) ? half_float::half::round_style : round_indeterminate; - - /// Significant digits. - static HALF_CONSTEXPR_CONST int digits = 11; - - /// Significant decimal digits. - static HALF_CONSTEXPR_CONST int digits10 = 3; - - /// Required decimal digits to represent all possible values. - static HALF_CONSTEXPR_CONST int max_digits10 = 5; - - /// Number base. - static HALF_CONSTEXPR_CONST int radix = 2; - - /// One more than smallest exponent. - static HALF_CONSTEXPR_CONST int min_exponent = -13; - - /// Smallest normalized representable power of 10. - static HALF_CONSTEXPR_CONST int min_exponent10 = -4; - - /// One more than largest exponent - static HALF_CONSTEXPR_CONST int max_exponent = 16; - - /// Largest finitely representable power of 10. - static HALF_CONSTEXPR_CONST int max_exponent10 = 4; - - /// Smallest positive normal value. - static HALF_CONSTEXPR half_float::half min() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x0400); } - - /// Smallest finite value. - static HALF_CONSTEXPR half_float::half lowest() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0xFBFF); } - - /// Largest finite value. - static HALF_CONSTEXPR half_float::half max() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7BFF); } - - /// Difference between one and next representable value. - static HALF_CONSTEXPR half_float::half epsilon() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x1400); } - - /// Maximum rounding error. - static HALF_CONSTEXPR half_float::half round_error() HALF_NOTHROW - { return half_float::half(half_float::detail::binary, (round_style==std::round_to_nearest) ? 0x3800 : 0x3C00); } - - /// Positive infinity. - static HALF_CONSTEXPR half_float::half infinity() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7C00); } - - /// Quiet NaN. - static HALF_CONSTEXPR half_float::half quiet_NaN() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7FFF); } - - /// Signalling NaN. - static HALF_CONSTEXPR half_float::half signaling_NaN() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x7DFF); } - - /// Smallest positive subnormal value. - static HALF_CONSTEXPR half_float::half denorm_min() HALF_NOTHROW { return half_float::half(half_float::detail::binary, 0x0001); } - }; - -#if HALF_ENABLE_CPP11_HASH - /// Hash function for half-precision floats. - /// This is only defined if C++11 `std::hash` is supported and enabled. - template<> struct hash //: unary_function - { - /// Type of function argument. - typedef half_float::half argument_type; - - /// Function return type. - typedef size_t result_type; - - /// Compute hash function. - /// \param arg half to hash - /// \return hash value - result_type operator()(argument_type arg) const - { return hash()(static_cast(arg.data_)&-(arg.data_!=0x8000)); } - }; -#endif -} - - -#undef HALF_CONSTEXPR -#undef HALF_CONSTEXPR_CONST -#undef HALF_NOEXCEPT -#undef HALF_NOTHROW -#ifdef HALF_POP_WARNINGS - #pragma warning(pop) - #undef HALF_POP_WARNINGS -#endif - -#endif diff --git a/include/triton/external/hip.h b/include/triton/external/hip.h deleted file mode 100644 index a099f857b7d5..000000000000 --- a/include/triton/external/hip.h +++ /dev/null @@ -1,288 +0,0 @@ -/* - * @brief hipError_t - * @enum - * @ingroup Enumerations - */ -// Developer note - when updating these, update the hipErrorName and hipErrorString functions in -// NVCC and HCC paths Also update the hipCUDAErrorTohipError function in NVCC path. - -// Ignoring error-code return values from hip APIs is discouraged. On C++17, -// we can make that yield a warning - -/* - * @brief hipError_t - * @enum - * @ingroup Enumerations - */ -// Developer note - when updating these, update the hipErrorName and hipErrorString functions in -// NVCC and HCC paths Also update the hipCUDAErrorTohipError function in NVCC path. - -#include - -typedef enum hipError_t { - hipSuccess = 0, ///< Successful completion. - hipErrorInvalidValue = 1, ///< One or more of the parameters passed to the API call is NULL - ///< or not in an acceptable range. - hipErrorOutOfMemory = 2, - // Deprecated - hipErrorMemoryAllocation = 2, ///< Memory allocation error. - hipErrorNotInitialized = 3, - // Deprecated - hipErrorInitializationError = 3, - hipErrorDeinitialized = 4, - hipErrorProfilerDisabled = 5, - hipErrorProfilerNotInitialized = 6, - hipErrorProfilerAlreadyStarted = 7, - hipErrorProfilerAlreadyStopped = 8, - hipErrorInvalidConfiguration = 9, - hipErrorInvalidPitchValue = 12, - hipErrorInvalidSymbol = 13, - hipErrorInvalidDevicePointer = 17, ///< Invalid Device Pointer - hipErrorInvalidMemcpyDirection = 21, ///< Invalid memory copy direction - hipErrorInsufficientDriver = 35, - hipErrorMissingConfiguration = 52, - hipErrorPriorLaunchFailure = 53, - hipErrorInvalidDeviceFunction = 98, - hipErrorNoDevice = 100, ///< Call to hipGetDeviceCount returned 0 devices - hipErrorInvalidDevice = 101, ///< DeviceID must be in range 0...#compute-devices. - hipErrorInvalidImage = 200, - hipErrorInvalidContext = 201, ///< Produced when input context is invalid. - hipErrorContextAlreadyCurrent = 202, - hipErrorMapFailed = 205, - // Deprecated - hipErrorMapBufferObjectFailed = 205, ///< Produced when the IPC memory attach failed from ROCr. - hipErrorUnmapFailed = 206, - hipErrorArrayIsMapped = 207, - hipErrorAlreadyMapped = 208, - hipErrorNoBinaryForGpu = 209, - hipErrorAlreadyAcquired = 210, - hipErrorNotMapped = 211, - hipErrorNotMappedAsArray = 212, - hipErrorNotMappedAsPointer = 213, - hipErrorECCNotCorrectable = 214, - hipErrorUnsupportedLimit = 215, - hipErrorContextAlreadyInUse = 216, - hipErrorPeerAccessUnsupported = 217, - hipErrorInvalidKernelFile = 218, ///< In CUDA DRV, it is CUDA_ERROR_INVALID_PTX - hipErrorInvalidGraphicsContext = 219, - hipErrorInvalidSource = 300, - hipErrorFileNotFound = 301, - hipErrorSharedObjectSymbolNotFound = 302, - hipErrorSharedObjectInitFailed = 303, - hipErrorOperatingSystem = 304, - hipErrorInvalidHandle = 400, - // Deprecated - hipErrorInvalidResourceHandle = 400, ///< Resource handle (hipEvent_t or hipStream_t) invalid. - hipErrorNotFound = 500, - hipErrorNotReady = 600, ///< Indicates that asynchronous operations enqueued earlier are not - ///< ready. This is not actually an error, but is used to distinguish - ///< from hipSuccess (which indicates completion). APIs that return - ///< this error include hipEventQuery and hipStreamQuery. - hipErrorIllegalAddress = 700, - hipErrorLaunchOutOfResources = 701, ///< Out of resources error. - hipErrorLaunchTimeOut = 702, - hipErrorPeerAccessAlreadyEnabled = - 704, ///< Peer access was already enabled from the current device. - hipErrorPeerAccessNotEnabled = - 705, ///< Peer access was never enabled from the current device. - hipErrorSetOnActiveProcess = 708, - hipErrorAssert = 710, ///< Produced when the kernel calls assert. - hipErrorHostMemoryAlreadyRegistered = - 712, ///< Produced when trying to lock a page-locked memory. - hipErrorHostMemoryNotRegistered = - 713, ///< Produced when trying to unlock a non-page-locked memory. - hipErrorLaunchFailure = - 719, ///< An exception occurred on the device while executing a kernel. - hipErrorCooperativeLaunchTooLarge = - 720, ///< This error indicates that the number of blocks launched per grid for a kernel - ///< that was launched via cooperative launch APIs exceeds the maximum number of - ///< allowed blocks for the current device - hipErrorNotSupported = 801, ///< Produced when the hip API is not supported/implemented - hipErrorUnknown = 999, //< Unknown error. - // HSA Runtime Error Codes start here. - hipErrorRuntimeMemory = 1052, ///< HSA runtime memory call returned error. Typically not seen - ///< in production systems. - hipErrorRuntimeOther = 1053, ///< HSA runtime call other than memory returned error. Typically - ///< not seen in production systems. - hipErrorTbd ///< Marker that more error codes are needed. -} hipError_t; - - -typedef struct ihipCtx_t* hipCtx_t; - -// Note many APIs also use integer deviceIds as an alternative to the device pointer: -typedef int hipDevice_t; - -typedef enum hipDeviceP2PAttr { - hipDevP2PAttrPerformanceRank = 0, - hipDevP2PAttrAccessSupported, - hipDevP2PAttrNativeAtomicSupported, - hipDevP2PAttrHipArrayAccessSupported -} hipDeviceP2PAttr; - -typedef struct ihipStream_t* hipStream_t; - -#define hipIpcMemLazyEnablePeerAccess 0 - -#define HIP_IPC_HANDLE_SIZE 64 - -typedef struct hipIpcMemHandle_st { - char reserved[HIP_IPC_HANDLE_SIZE]; -} hipIpcMemHandle_t; - -typedef struct hipIpcEventHandle_st { - char reserved[HIP_IPC_HANDLE_SIZE]; -} hipIpcEventHandle_t; - -typedef struct ihipModule_t* hipModule_t; - -typedef struct ihipModuleSymbol_t* hipFunction_t; - -typedef struct hipFuncAttributes { - int binaryVersion; - int cacheModeCA; - size_t constSizeBytes; - size_t localSizeBytes; - int maxDynamicSharedSizeBytes; - int maxThreadsPerBlock; - int numRegs; - int preferredShmemCarveout; - int ptxVersion; - size_t sharedSizeBytes; -} hipFuncAttributes; - -typedef struct ihipEvent_t* hipEvent_t; - -/* - * @brief hipDeviceAttribute_t - * @enum - * @ingroup Enumerations - */ -typedef enum hipDeviceAttribute_t { - hipDeviceAttributeMaxThreadsPerBlock, ///< Maximum number of threads per block. - hipDeviceAttributeMaxBlockDimX, ///< Maximum x-dimension of a block. - hipDeviceAttributeMaxBlockDimY, ///< Maximum y-dimension of a block. - hipDeviceAttributeMaxBlockDimZ, ///< Maximum z-dimension of a block. - hipDeviceAttributeMaxGridDimX, ///< Maximum x-dimension of a grid. - hipDeviceAttributeMaxGridDimY, ///< Maximum y-dimension of a grid. - hipDeviceAttributeMaxGridDimZ, ///< Maximum z-dimension of a grid. - hipDeviceAttributeMaxSharedMemoryPerBlock, ///< Maximum shared memory available per block in - ///< bytes. - hipDeviceAttributeTotalConstantMemory, ///< Constant memory size in bytes. - hipDeviceAttributeWarpSize, ///< Warp size in threads. - hipDeviceAttributeMaxRegistersPerBlock, ///< Maximum number of 32-bit registers available to a - ///< thread block. This number is shared by all thread - ///< blocks simultaneously resident on a - ///< multiprocessor. - hipDeviceAttributeClockRate, ///< Peak clock frequency in kilohertz. - hipDeviceAttributeMemoryClockRate, ///< Peak memory clock frequency in kilohertz. - hipDeviceAttributeMemoryBusWidth, ///< Global memory bus width in bits. - hipDeviceAttributeMultiprocessorCount, ///< Number of multiprocessors on the device. - hipDeviceAttributeComputeMode, ///< Compute mode that device is currently in. - hipDeviceAttributeL2CacheSize, ///< Size of L2 cache in bytes. 0 if the device doesn't have L2 - ///< cache. - hipDeviceAttributeMaxThreadsPerMultiProcessor, ///< Maximum resident threads per - ///< multiprocessor. - hipDeviceAttributeComputeCapabilityMajor, ///< Major compute capability version number. - hipDeviceAttributeComputeCapabilityMinor, ///< Minor compute capability version number. - hipDeviceAttributeConcurrentKernels, ///< Device can possibly execute multiple kernels - ///< concurrently. - hipDeviceAttributePciBusId, ///< PCI Bus ID. - hipDeviceAttributePciDeviceId, ///< PCI Device ID. - hipDeviceAttributeMaxSharedMemoryPerMultiprocessor, ///< Maximum Shared Memory Per - ///< Multiprocessor. - hipDeviceAttributeIsMultiGpuBoard, ///< Multiple GPU devices. - hipDeviceAttributeIntegrated, ///< iGPU - hipDeviceAttributeCooperativeLaunch, ///< Support cooperative launch - hipDeviceAttributeCooperativeMultiDeviceLaunch, ///< Support cooperative launch on multiple devices - hipDeviceAttributeMaxTexture1DWidth, ///< Maximum number of elements in 1D images - hipDeviceAttributeMaxTexture2DWidth, ///< Maximum dimension width of 2D images in image elements - hipDeviceAttributeMaxTexture2DHeight, ///< Maximum dimension height of 2D images in image elements - hipDeviceAttributeMaxTexture3DWidth, ///< Maximum dimension width of 3D images in image elements - hipDeviceAttributeMaxTexture3DHeight, ///< Maximum dimensions height of 3D images in image elements - hipDeviceAttributeMaxTexture3DDepth, ///< Maximum dimensions depth of 3D images in image elements - - hipDeviceAttributeHdpMemFlushCntl, ///< Address of the HDP_MEM_COHERENCY_FLUSH_CNTL register - hipDeviceAttributeHdpRegFlushCntl, ///< Address of the HDP_REG_COHERENCY_FLUSH_CNTL register - - hipDeviceAttributeMaxPitch, ///< Maximum pitch in bytes allowed by memory copies - hipDeviceAttributeTextureAlignment, /// -#include -#include "value.h" -#include "visitor.h" - -namespace triton{ -namespace ir{ - -class context; -class function; -class instruction; - -/* Basic Block */ -class basic_block: public value{ -public: - // instruction iterator types - typedef std::list inst_list_t; - typedef inst_list_t::iterator iterator; - typedef inst_list_t::const_iterator const_iterator; - typedef inst_list_t::reverse_iterator reverse_iterator; - typedef inst_list_t::const_reverse_iterator const_reverse_iterator; - -private: - // constructors - basic_block(context &ctx, const std::string &name, function *parent, basic_block *next); - -public: - // accessors - function* get_parent() { return parent_; } - context& get_context() { return ctx_; } - - // get iterator to first instruction that is not a phi - void replace_phi_uses_with(basic_block* before, basic_block* after); - iterator get_first_non_phi(); - - // get instruction list - inst_list_t &get_inst_list() { return inst_list_; } - const inst_list_t &get_inst_list() const { return inst_list_; } - void erase(instruction *i) { inst_list_.remove(i); } - - // instruction iterator functions - inline iterator begin() { return inst_list_.begin(); } - inline const_iterator begin() const { return inst_list_.begin(); } - inline iterator end () { return inst_list_.end(); } - inline const_iterator end () const { return inst_list_.end(); } - - inline reverse_iterator rbegin() { return inst_list_.rbegin(); } - inline const_reverse_iterator rbegin() const { return inst_list_.rbegin(); } - inline reverse_iterator rend () { return inst_list_.rend(); } - inline const_reverse_iterator rend () const { return inst_list_.rend(); } - - inline size_t size() const { return inst_list_.size(); } - inline bool empty() const { return inst_list_.empty(); } - inline const instruction &front() const { return *inst_list_.front(); } - inline instruction &front() { return *inst_list_.front(); } - inline const instruction &back() const { return *inst_list_.back(); } - inline instruction &back() { return *inst_list_.back(); } - - void append_instruction(ir::instruction* i); - // split - basic_block* split_before(ir::instruction* loc, const std::string& name); - - // predecessors - std::vector get_predecessors() const; - std::vector get_successors() const; - - // factory functions - static basic_block* create(context &ctx, const std::string &name, function *parent, basic_block *next = nullptr); - - void print(std::ostream &os); - - // visitor - void accept(visitor *v) { v->visit_basic_block(this); } - -private: - context &ctx_; - std::string name_; - function *parent_; - std::vector preds_; - std::vector succs_; - inst_list_t inst_list_; -}; - -} -} - -#endif diff --git a/include/triton/ir/builder.h b/include/triton/ir/builder.h deleted file mode 100644 index d94dc4a2ab9b..000000000000 --- a/include/triton/ir/builder.h +++ /dev/null @@ -1,212 +0,0 @@ -#pragma once - -#ifndef _TRITON_IR_BUILDER_H_ -#define _TRITON_IR_BUILDER_H_ - -#include -#include -#include "instructions.h" -#include "basic_block.h" -#include "type.h" - -namespace triton{ -namespace ir{ - -class basic_block; -class value; -class type; -class constant_int; -class instruction; -class context; -class phi_node; - -/* Builder */ -class builder{ -public: - typedef basic_block::iterator iterator; - -public: - // Constructor - builder(context &ctx); - // Getters - // const context& get_context() const { return ctx_; } - context& get_context() { return ctx_; } - - // Setters - void set_insert_point(iterator instr); - void set_insert_point(instruction* i); - void set_insert_point_after(instruction* i); - void set_insert_point(basic_block* block); - basic_block* get_insert_block() { return block_; } - iterator get_insert_point() { return insert_point_;} - // Constants - value *get_int1(bool val); - value *get_int32(uint32_t val); - value *get_int64(uint64_t val); - value *get_float16(float val); - value *get_float32(float val); - value *get_range(int32_t lo, int32_t hi); - // Types - type *get_void_ty(); - type *get_int1_ty(); - type *get_int8_ty(); - type *get_int16_ty(); - type *get_int32_ty(); - type *get_int64_ty(); - type *get_fp8_ty(); - type *get_half_ty(); - type *get_bf16_ty(); - type *get_float_ty(); - type *get_double_ty(); - // Insert - template - InstTy* insert(InstTy *inst){ - assert(block_); - block_->get_inst_list().insert(insert_point_, inst); - inst->set_parent(block_); -// for(ir::value* op: inst->ops()) -// op->add_use(inst); - return inst; - } - // terminator instructions - value* create_br(basic_block *dest); - value* create_cond_br(value *cond, basic_block* if_dest, basic_block* else_dest); - value* create_ret_void(); - value* create_ret(value *ret); - // Dequantize instructions - value* create_dequantize(value *src, value *scale, value *shift, type *dest_ty); - // Cast instructions - value* create_bitcast(value *src, type *dest_ty); - value *create_cast(cast_op_t op, value *v, type *dst_ty); - value* create_int_to_ptr(value *src, type *dst_ty); - value* create_ptr_to_int(value *src, type *dst_ty); - value* create_si_to_fp(value *src, type *dst_ty); - value* create_ui_to_fp(value *src, type *dst_ty); - value* create_fp_to_si(value *src, type *dst_ty); - value* create_fp_to_ui(value *src, type *dst_ty); - value* create_fp_ext(value *src, type *dst_ty); - value* create_fp_trunc(value *src, type *dst_ty); - value* create_int_cast(value *src, type *dst_ty, bool is_signed); - value *create_downcast(value *arg); - // Call instruction - value* create_call(function* fn, const std::vector& args); - value* create_launch(function* fn, const std::vector& args, const std::vector& grid, value* num_warps); - // Phi instruction - phi_node* create_phi(type *ty, unsigned num_reserved); - // Binary instructions - value *create_insert_nuwnswb_binop(binary_op_t op, value *lhs, value *rhs, bool has_nuw, bool has_nsw); - value *create_fmul(value *lhs, value *rhs); - value *create_fdiv(value *lhs, value *rhs); - value *create_frem(value *lhs, value *rhs); - value *create_fadd(value *lhs, value *rhs); - value *create_fsub(value *lhs, value *rhs); - value *create_sdiv(value *lhs, value *rhs); - value *create_udiv(value *lhs, value *rhs); - value *create_srem(value *lhs, value *rhs); - value *create_urem(value *lhs, value *rhs); - value *create_mul(value *lhs, value *rhs, bool has_nuw = false, bool has_nsw = false); - value *create_add(value *lhs, value *rhs, bool has_nuw = false, bool has_nsw = false); - value *create_sub(value *lhs, value *rhs, bool has_nuw = false, bool has_nsw = false); - value *create_shl(value *lhs, value *rhs, bool has_nuw = false, bool has_nsw = false); - value *create_lshr(value *lhs, value *rhs, bool has_nuw = false, bool has_nsw = false); - value *create_ashr(value *lhs, value *rhs, bool has_nuw = false, bool has_nsw = false); - // GEP - value *create_gep(value *ptr, const std::vector& idx_list); - // Comparison (int) - value *create_icmp(cmp_pred_t pred, value *lhs, value *rhs); - value *create_icmpSLE(value *lhs, value *rhs); - value *create_icmpSLT(value *lhs, value *rhs); - value *create_icmpSGE(value *lhs, value *rhs); - value *create_icmpSGT(value *lhs, value *rhs); - value *create_icmpULE(value *lhs, value *rhs); - value *create_icmpULT(value *lhs, value *rhs); - value *create_icmpUGE(value *lhs, value *rhs); - value *create_icmpUGT(value *lhs, value *rhs); - value *create_icmpEQ(value *lhs, value *rhs); - value *create_icmpNE(value *lhs, value *rhs); - // Comparison (float) - value *create_fcmp(cmp_pred_t pred, value *lhs, value *rhs); - value *create_fcmpOLT(value *lhs, value *rhs); - value *create_fcmpOGT(value *lhs, value *rhs); - value *create_fcmpOLE(value *lhs, value *rhs); - value *create_fcmpOGE(value *lhs, value *rhs); - value *create_fcmpOEQ(value *lhs, value *rhs); - value *create_fcmpONE(value *lhs, value *rhs); - value *create_fcmpULT(value *lhs, value *rhs); - value *create_fcmpUGT(value *lhs, value *rhs); - value *create_fcmpULE(value *lhs, value *rhs); - value *create_fcmpUGE(value *lhs, value *rhs); - value *create_fcmpUEQ(value *lhs, value *rhs); - value *create_fcmpUNE(value *lhs, value *rhs); - // Logical - value *create_and(value *lhs, value *rhs); - value *create_xor(value *lhs, value *rhs); - value *create_or(value *lhs, value *rhs); - // Input/Output - value *create_load(value *arg, load_inst::CACHE_MODIFIER cache, load_inst::EVICTION_POLICY eviction, bool is_volatile); - value *create_store(value *ptr, value *val, store_inst::EVICTION_POLICY eviction); - value *create_masked_load(value *arg, value *mask, value *false_value, load_inst::CACHE_MODIFIER cache, load_inst::EVICTION_POLICY eviction, bool is_volatile); - value *create_masked_store(value *ptr, value *val, value *mask, store_inst::EVICTION_POLICY eviction); - // Struct instructions - value *create_insert_value(value* val, value *elt, size_t idx); - value *create_extract_value(value* val, size_t idx); - // Block instruction - value *create_splat(value *arg, const type::block_shapes_t &shapes); - value *create_reshape(value *arg, const type::block_shapes_t &shapes); - value *create_cat(value *lhs, value *rhs); - value *create_broadcast(value *arg, const type::block_shapes_t &shapes); - // Atomic instruction - value *create_atomic_cas(value *ptr, value *cmp, value *val); - value *create_atomic_rmw(atomic_rmw_op_t op, value *ptr, value *val, value *msk); - value *create_atomic_max(value *ptr, value *val, value *msk); - value *create_atomic_umax(value *ptr, value *val, value *msk); - value *create_atomic_min(value *ptr, value *val, value *msk); - value *create_atomic_umin(value *ptr, value *val, value *msk); - value *create_atomic_fadd(value *ptr, value *val, value *msk); - value *create_atomic_add(value *ptr, value *val, value *msk); - value *create_atomic_and(value *ptr, value *val, value *msk); - value *create_atomic_or(value *ptr, value *val, value *msk); - value *create_atomic_xor(value *ptr, value *val, value *msk); - value *create_atomic_xchg(value *ptr, value *val, value *msk); - // Utilities - value *create_clock(); - value *create_globaltimer(); - // Extern instruction - value *create_extern_elementwise(const std::string &lib_name, - const std::string &lib_path, - const std::string &symbol_name, - const std::vector &args, - type *ret_ty); - // Built-in instruction - value *create_get_program_id(unsigned axis); - value *create_get_num_programs(unsigned axis); - value *create_exp(value* arg); - value *create_cos(value* arg); - value *create_sin(value* arg); - value *create_log(value* arg); - value *create_dot(value *A, value *B, value *C, bool trans_a, bool trans_b, bool allow_tf32); - value *create_trans(value *A, const std::vector &perm = {}); - value *create_sqrt(value *A); - value *create_reduce(value *A, reduce_inst::op_t op, unsigned axis); - value *create_select(value *pred, value *if_value, value *else_value); - // Intrinsics - // These have no place in the IR, and hopefully they can be removed at some point - value *create_umulhi(value* lhs, value* rhs); - value *create_copy_to_shared(value *arg); - value *create_masked_load_async(value *arg, value *mask, value *false_value, load_inst::CACHE_MODIFIER cache, load_inst::EVICTION_POLICY); - value *create_copy_from_shared(value *arg); - value *create_barrier(const std::string &name = ""); - value *create_async_wait(int N); - value *create_prefetch_s(value *arg, int inc); - -private: - context &ctx_; - basic_block *block_; - iterator insert_point_; -}; - - -} -} - -#endif diff --git a/include/triton/ir/constant.h b/include/triton/ir/constant.h deleted file mode 100644 index 671d5e5f079e..000000000000 --- a/include/triton/ir/constant.h +++ /dev/null @@ -1,113 +0,0 @@ -#pragma once - -#ifndef _TRITON_IR_CONSTANT_H_ -#define _TRITON_IR_CONSTANT_H_ - -#include "enums.h" -#include "value.h" -#include -#include "visitor.h" - -namespace triton{ -namespace ir{ - -class type; -class context; - -/* Constant */ -class constant: public user{ -protected: - using user::user; - -public: - static constant* get_all_ones_value(type *ty); - static constant* get_null_value(type *ty); - virtual std::string repr() const = 0; -}; - -/* Undef value */ -class undef_value: public constant{ -private: - undef_value(type *ty); - -public: - static undef_value* get(type* ty); - std::string repr() const { return "undef"; } - void accept(visitor* vst) { vst->visit_undef_value(this); } -}; - - -/* Constant int */ -class constant_int: public constant{ -protected: - constant_int(type *ty, uint64_t value); - -public: - virtual uint64_t get_value() const { return value_; } - static constant_int *get(type *ty, uint64_t value); - std::string repr() const { return std::to_string(value_); } - void accept(visitor* vst) { vst->visit_constant_int(this); } - -protected: - uint64_t value_; -}; - -/* Constant fp */ -class constant_fp: public constant{ - constant_fp(type *ty, double value); - -public: - double get_value() { return value_; } - static constant* get_negative_zero(type *ty); - static constant* get_zero_value_for_negation(type *ty); - static constant* get(context &ctx, double v); - static constant* get(type *ty, double v); - std::string repr() const { return std::to_string(value_); } - void accept(visitor* vst) { vst->visit_constant_fp(this); } - -private: - double value_; -}; - - -/* Global Value */ -class global_value: public constant { -public: - enum linkage_types_t { - external - }; - -public: - global_value(type *ty, unsigned num_ops, - linkage_types_t linkage, const std::string &name, - unsigned addr_space); - std::string repr() const { return get_name(); } - -private: - linkage_types_t linkage_; -}; - -/* global object */ -class global_object: public global_value { -public: - global_object(type *ty, unsigned num_ops, - linkage_types_t linkage, const std::string &name, - unsigned addr_space = 0); - std::string repr() const { return get_name(); } -}; - -/* global variable */ -class alloc_const: public global_object { -public: - alloc_const(type *ty, constant_int *size, - const std::string &name = ""); - std::string repr() const { return get_name(); } - void accept(visitor* vst) { vst->visit_alloc_const(this); } - - -}; - -} -} - -#endif diff --git a/include/triton/ir/context.h b/include/triton/ir/context.h deleted file mode 100644 index d824c98b6318..000000000000 --- a/include/triton/ir/context.h +++ /dev/null @@ -1,29 +0,0 @@ -#pragma once - -#ifndef _TRITON_IR_CONTEXT_H_ -#define _TRITON_IR_CONTEXT_H_ - -#include -#include "triton/ir/type.h" - -namespace triton{ -namespace ir{ - -class type; -class context_impl; - -/* Context */ -class context { -public: - context(); - context(const context&) = delete; - context& operator=(const context&) = delete; - -public: - std::shared_ptr p_impl; -}; - -} -} - -#endif diff --git a/include/triton/ir/context_impl.h b/include/triton/ir/context_impl.h deleted file mode 100644 index f2d956a3030e..000000000000 --- a/include/triton/ir/context_impl.h +++ /dev/null @@ -1,47 +0,0 @@ -#pragma once - -#ifndef _TRITON_IR_CONTEXT_IMPL_H_ -#define _TRITON_IR_CONTEXT_IMPL_H_ - -#include "triton/ir/type.h" -#include "triton/ir/constant.h" -#include -#include - -namespace triton{ -namespace ir{ - -class context; - -/* Context impl */ -class context_impl { -public: - // constructors - context_impl(context &ctx); - -public: - // non-numeric types - type void_ty, label_ty; - // floating point types - type fp8_ty, fp16_ty, bf16_ty, fp32_ty, fp64_ty; - // integer types - integer_type int1_ty, int8_ty, int16_ty, int32_ty, int64_ty, int128_ty; - // Pointer types - std::map, std::unique_ptr> ptr_tys; - // Block types - std::map, std::unique_ptr> block_tys; - // Struct types - std::map struct_tys; - // Int constants - std::map, std::unique_ptr> int_constants_; - // Float constants - std::map, std::unique_ptr> fp_constants_; - // undef values - std::map> uv_constants_; - -}; - -} -} - -#endif diff --git a/include/triton/ir/enums.h b/include/triton/ir/enums.h deleted file mode 100644 index 0ecdb409deb0..000000000000 --- a/include/triton/ir/enums.h +++ /dev/null @@ -1,187 +0,0 @@ -#pragma once - -#ifndef _TRITON_IR_ENUMS_H_ -#define _TRITON_IR_ENUMS_H_ - -namespace triton{ -namespace ir{ - - -enum binary_op_t: unsigned int{ - Add, - FAdd, - Sub, - FSub, - Mul, - FMul, - UDiv, - SDiv, - FDiv, - URem, - SRem, - FRem, - Shl, - LShr, - AShr, - And, - Or, - Xor -}; - -enum class atomic_rmw_op_t: unsigned int{ - And, - Or, - Xor, - Add, - Max, - Min, - UMax, - UMin, - FAdd, - Xchg, -}; - -enum cast_op_t: unsigned int { - Trunc, - ZExt, - SExt, - FPTrunc, - FPExt, - UIToFP, - SIToFP, - FPToUI, - FPToSI, - PtrToInt, - IntToPtr, - BitCast, - AddrSpaceCast -}; - -enum cmp_pred_t: unsigned int { - FIRST_FCMP_PREDICATE, - FCMP_FALSE, - FCMP_OEQ, - FCMP_OGT, - FCMP_OGE, - FCMP_OLT, - FCMP_OLE, - FCMP_ONE, - FCMP_ORD, - FCMP_UNO, - FCMP_UEQ, - FCMP_UGT, - FCMP_UGE, - FCMP_ULT, - FCMP_ULE, - FCMP_UNE, - FCMP_TRUE, - LAST_FCMP_PREDICATE, - FIRST_ICMP_PREDICATE, - ICMP_EQ, - ICMP_NE, - ICMP_UGT, - ICMP_UGE, - ICMP_ULT, - ICMP_ULE, - ICMP_SGT, - ICMP_SGE, - ICMP_SLT, - ICMP_SLE, - LAST_ICMP_PREDICATE -}; - -enum value_id_t: unsigned { - /* ------------ * - INSTRUCTIONS - * ------------ */ - INST_BEGIN, - // call - INST_CALL, - INST_LAUNCH, - // phi - INST_PHI, - // arithmetic - INST_BINOP, - INST_GETELEMENTPTR, - INST_SELECT, - INST_SQRT, - // cmp - INST_ICMP, - INST_FCMP, - // dequantize - INST_DEQUANTIZE, - // cast - INST_CAST_TRUNC, - INST_CAST_ZEXT, - INST_CAST_SEXT, - INST_CAST_FP_TRUNC, - INST_CAST_FP_EXT, - INST_CAST_UI_TO_FP, - INST_CAST_SI_TO_FP, - INST_CAST_FP_TO_UI, - INST_CAST_FP_TO_SI, - INST_CAST_PTR_TO_INT, - INST_CAST_INT_TO_PTR, - INST_CAST_BIT_CAST, - INST_CAST_ADDR_SPACE_CAST, - // terminators - INST_RETURN, - INST_COND_BRANCH, - INST_UNCOND_BRANCH, - // io - INST_UNMASKED_LOAD, - INST_MASKED_LOAD, - INST_MASKED_LOAD_ASYNC, - INST_UNMASKED_STORE, - INST_MASKED_STORE, - // struct - INST_EXTRACT_VALUE, - INST_INSERT_VALUE, - // retile - INST_RESHAPE, - INST_SPLAT, - INST_CAT, - INST_BROADCAST, - INST_DOWNCAST, - // builtin - INST_GET_PROGRAM_ID, - INST_GET_NUM_PROGRAMS, - // atomics - INST_ATOMIC_CAS, - INST_ATOMIC_EXCH, - INST_ATOMIC_RMW, - // math - INST_UMULHI, - INST_EXP, - INST_COS, - INST_SIN, - INST_LOG, - // extern - INST_EXTERN_ELEMENTWISE, - // array arithmetic - INST_TRANS, - INST_REDUCE, - INST_DOT, - // intrinsics - INST_COPY_TO_SHARED, - INST_COPY_FROM_SHARED, - INST_CVT_LAYOUT, - INST_CVT_SCANLINE, - INST_DECOALESCE, - INST_RECOALESCE, - INST_BARRIER, - INST_ASYNC_WAIT, - INST_MAKE_RANGE_DYN, - INST_MAKE_RANGE_STA, - INST_MAKE_RANGE, - INST_PREFETCH_S, - INST_GLOBALTIMER, - INST_CLOCK, -}; - - - -} -} - -#endif diff --git a/include/triton/ir/function.h b/include/triton/ir/function.h deleted file mode 100644 index 61ec2a6ae518..000000000000 --- a/include/triton/ir/function.h +++ /dev/null @@ -1,145 +0,0 @@ -#pragma once - -#ifndef _TRITON_IR_FUNCTION_H_ -#define _TRITON_IR_FUNCTION_H_ - -#include -#include -#include "value.h" -#include "constant.h" - -namespace triton{ -namespace ir{ - -class function; -class function_type; -class module; -class basic_block; - -/* Argument */ -class argument: public value{ - argument(type *ty, const std::string &name, function *parent, unsigned arg_no); - -public: - static argument* create(type *ty, const std::string &name, - function *parent = nullptr, unsigned arg_no = 0); - function* get_parent() const; - unsigned get_arg_no() const; - - void accept(visitor *v); - -private: - function *parent_; - unsigned arg_no_; -}; - -/* Attribute */ -enum attribute_kind_t { - readonly = 0, - writeonly, - noalias, - aligned, - multiple_of, - retune, - not_implemented -}; - -class attribute { -public: - attribute(attribute_kind_t kind, unsigned value = 0): - kind_(kind), value_(value){} - - bool operator<(const attribute& other) const { - return std::make_pair(kind_, value_) < std::make_pair(other.kind_, other.value_); - } - - attribute_kind_t get_kind() const { - return kind_; - } - - unsigned get_value() const { - return value_; - } - - bool is_llvm_attr() const { - return kind_ != multiple_of; - } - - std::string repr() const { - switch(kind_){ - case readonly: return ".readonly"; - case writeonly: return ".writeonly"; - case noalias: return ".noalias"; - case aligned: return ".aligned(" + std::to_string(value_) + ")"; - case multiple_of: return ".multipleof(" + std::to_string(value_) + ")"; - case retune: return ".retunr"; - default: break; - } - assert(false); - return ""; - } - -private: - attribute_kind_t kind_; - unsigned value_; -}; - -/* Function */ -class function: public global_object{ - typedef std::vector args_t; - typedef args_t::iterator arg_iterator; - typedef args_t::const_iterator const_arg_iterator; - - typedef std::vector blocks_t; - typedef blocks_t::iterator block_iterator; - typedef blocks_t::const_iterator const_block_iterator; - - typedef std::map> attr_map_t; - -private: - function(function_type *ty, linkage_types_t linkage, - const std::string &name = "", module *parent = nullptr); - -public: - // accessors - const args_t &args() const { return args_; } - function_type* get_fn_type() { return fn_ty_; } - const function_type* get_fn_type() const { return fn_ty_; } - module *get_parent() { return parent_; } - const module *get_parent() const { return parent_; } - - // factory methods - static function *create(function_type *ty, linkage_types_t linkage, - const std::string &name, module *mod); - // blocks - blocks_t &blocks() { return blocks_; } - const blocks_t &blocks() const { return blocks_; } - void insert_block(basic_block* block, basic_block *next = nullptr); - - // attributes - void add_attr(unsigned arg_id, attribute attr) { attrs_[arg_id].insert(attr); } - const attr_map_t &attrs() { return attrs_; } - bool has_attr(unsigned arg_id) const { return attrs_.find(arg_id) != attrs_.end(); } - std::set get_attributes(const argument* arg) { return attrs_[arg->get_arg_no() + 1]; } - void set_is_kernel(bool new_val) { is_kernel_ = new_val; } - bool get_is_kernel() { return is_kernel_; } - - void print(std::ostream &os); - - // visitor - void accept(visitor *v) { v->visit_function(this); } - -private: - module *parent_; - bool init_; - function_type *fn_ty_; - args_t args_; - blocks_t blocks_; - attr_map_t attrs_; - bool is_kernel_; -}; - -} -} - -#endif diff --git a/include/triton/ir/instructions.h b/include/triton/ir/instructions.h deleted file mode 100644 index 9c386737e5cd..000000000000 --- a/include/triton/ir/instructions.h +++ /dev/null @@ -1,1147 +0,0 @@ -#pragma once - -#ifndef _TRITON_IR_INSTRUCTIONS_H_ -#define _TRITON_IR_INSTRUCTIONS_H_ - -#include -#include -#include "triton/ir/enums.h" -#include "triton/ir/constant.h" -#include "triton/ir/value.h" -#include "triton/ir/type.h" -#include "triton/ir/metadata.h" -#include "triton/ir/visitor.h" - -#define _TRITON_DEFINE_CLONE(name) \ - ir::instruction* clone_impl() const { return new name(*this); } - -#define _TRITON_DEFINE_ACCEPT(name) \ - void accept(visitor* v) { v->visit_ ## name (this); } - -namespace triton{ -namespace ir{ - -class constant_int; -class constant; -class make_range; -class basic_block; -class context; -class visitor; - -//===----------------------------------------------------------------------===// -// instruction classes -//===----------------------------------------------------------------------===// - -class result_reference; - - -class instruction: public user{ -public: - virtual std::string repr_impl() const = 0; - -private: - virtual ir::instruction* clone_impl() const = 0; - -protected: - // constructors - instruction(type *ty, value_id_t ity, unsigned num_ops, - const std::string &name = "", instruction *next = nullptr); - -public: - // parent - void set_parent(basic_block *block) { parent_ = block; } - const basic_block *get_parent() const { return parent_; } - basic_block *get_parent() { return parent_; } - void erase_from_parent(); - // helpers - bool has_tile_result_or_op(); - // repr - std::string repr() const { return repr_impl(); } - // metadata - void set_metadata(ir::metadata::kind_t kind, - std::vector value) { metadatas_[kind] = value;} - std::vector get_metadata(ir::metadata::kind_t kind) { return metadatas_[kind];} - // cloning - ir::instruction* clone() { - ir::instruction* res = clone_impl(); -// for(auto it = op_begin(); it != op_end(); it++) -// (*it)->add_use(res); - res->parent_ = nullptr; - res->users_.clear(); - return res; - } - // instruction id - value_id_t get_id() const { return id_; } - - void print(std::ostream &os); - -private: - basic_block *parent_; - std::map> metadatas_; - value_id_t id_; -}; - -//===----------------------------------------------------------------------===// -// call_inst classes -//===----------------------------------------------------------------------===// - -class call_inst: public instruction { -private: - std::string repr_impl() const; - call_inst(ir::function* fn, const std::vector& values, const std::string& name, instruction* next); - -public: - static call_inst* create(ir::function* fn, const std::vector& values, const std::string &name = "", instruction *next = nullptr); - ir::function* get_fn() { return fn_; } - - _TRITON_DEFINE_CLONE(call_inst) - _TRITON_DEFINE_ACCEPT(call_inst) - -private: - ir::function* fn_; -}; - -class launch_inst: public instruction { -private: - std::string repr_impl() const { return "launch"; } - launch_inst(ir::function* fn, const std::vector& values, const std::vector& grid, ir::value* num_warps, - const std::string &name = "", instruction *next = nullptr); - -public: - static launch_inst* create(ir::function* fn, const std::vector& values, const std::vector& grid, ir::value* num_warps, - const std::string& name = "", instruction* next = nullptr); - - ir::function* get_fn(); - std::vector get_values(); - std::vector get_grid(); - ir::value* get_num_warps(); - - - _TRITON_DEFINE_CLONE(launch_inst) - _TRITON_DEFINE_ACCEPT(launch_inst) - -private: - unsigned val_begin; - unsigned val_end; - unsigned grid_begin; - unsigned grid_end; -}; - -//===----------------------------------------------------------------------===// -// phi_node classes -//===----------------------------------------------------------------------===// - -class phi_node: public instruction { -private: - phi_node(type *ty, unsigned num_reserved, const std::string &name, instruction *next); - std::string repr_impl() const { return "phi"; } - -public: - void set_incoming_value(unsigned i, value *v); - void set_incoming_block(unsigned i, basic_block *block); - value *get_value_for_block(basic_block *block); - value *get_incoming_value(unsigned i) { return get_operand(i); } - basic_block *get_incoming_block(unsigned i) { return blocks_[i]; } - unsigned get_num_incoming() { return get_num_operands(); } - void add_incoming(value *v, basic_block *block); - - // Type - void set_type(type *ty) { ty_ = ty; } - - // Factory methods - static phi_node* create(type *ty, unsigned num_reserved, const std::string &name = "", instruction *next = nullptr); - - _TRITON_DEFINE_CLONE(phi_node) - _TRITON_DEFINE_ACCEPT(phi_node) - -private: - unsigned num_reserved_; - std::vector blocks_; -}; - -//===----------------------------------------------------------------------===// -// binary_operator classes -//===----------------------------------------------------------------------===// - -class binary_operator: public instruction { -public: - typedef binary_op_t op_t; - -private: - std::string repr_impl() const; - -protected: - // Constructors - binary_operator(binary_op_t op, value *lhs, value *rhs, type *ty, const std::string &name, instruction *next); - -public: - // Get operand - binary_op_t get_op() const { return op_; } - - // Bool - bool is_terminator() const; - bool is_binary_op() const; - bool is_int_div_rem() const; - bool is_shift() const; - bool is_cast() const; - bool is_int_mult() const; - bool is_int_add_sub() const; - bool is_int_div() const; - bool is_int_rem() const; - bool is_shl() const; - bool is_shr() const; - - // Approx - void set_fdiv_ieee_rounding(bool rnd) { fdiv_ieee_rnd_ = rnd; } - bool get_fdiv_ieee_rounding() { return fdiv_ieee_rnd_; } - - // Wraps - void set_has_no_unsigned_wrap(bool b = true) { has_no_unsigned_wrap_ = b; } - void set_has_no_signed_wrap(bool b = true) { has_no_signed_wrap_ = b; } - - // Factory methods - static binary_operator *create(binary_op_t op, value *lhs, value *rhs, - const std::string &name = "", instruction *next = nullptr); -// static binary_operator *create_fneg(value *arg, const std::string &name = "", instruction *next = nullptr); -// static binary_operator *create_neg(value *arg, const std::string &name = "", instruction *next = nullptr); -// static binary_operator *create_not(value *arg, const std::string &name = "", instruction *next = nullptr); - - _TRITON_DEFINE_CLONE(binary_operator) - _TRITON_DEFINE_ACCEPT(binary_operator) - -public: - binary_op_t op_; - bool has_no_unsigned_wrap_; - bool has_no_signed_wrap_; - - bool fdiv_ieee_rnd_; -}; - - -//===----------------------------------------------------------------------===// -// cmp_inst classes -//===----------------------------------------------------------------------===// - -class cmp_inst: public instruction{ -public: - typedef cmp_pred_t pred_t; - -private: - std::string repr_impl() const; - -protected: - cmp_inst(type *ty, value_id_t id, cmp_pred_t pred, - value *lhs, value *rhs, const std::string &name, instruction *next); - static bool is_fp_predicate(cmp_pred_t pred); - static bool is_int_predicate(cmp_pred_t pred); - static type* make_cmp_result_type(type *ty); - -public: - cmp_pred_t get_pred() const { return pred_; } - -private: - cmp_pred_t pred_; -}; - -class icmp_inst: public cmp_inst { - icmp_inst(type *ty, cmp_pred_t pred, - value *lhs, value *rhs, const std::string &name, instruction *next); - -public: - static icmp_inst* create(cmp_pred_t pred, value *lhs, value *rhs, - const std::string &name = "", instruction *next = nullptr); - _TRITON_DEFINE_CLONE(icmp_inst) - _TRITON_DEFINE_ACCEPT(icmp_inst) -}; - -class fcmp_inst: public cmp_inst { - fcmp_inst(type *ty, cmp_pred_t pred, - value *lhs, value *rhs, const std::string &name, instruction *next); - -public: - static fcmp_inst* create(cmp_pred_t pred, value *lhs, value *rhs, - const std::string &name = "", instruction *next = nullptr); - _TRITON_DEFINE_CLONE(fcmp_inst) - _TRITON_DEFINE_ACCEPT(fcmp_inst) -}; - -//===----------------------------------------------------------------------===// -// unary_inst classes -//===----------------------------------------------------------------------===// - -class unary_inst: public instruction { -protected: - unary_inst(type *ty, value_id_t id, value *v, const std::string &name, instruction *next); -}; - -//===----------------------------------------------------------------------===// -// dequantize_inst classes -//===----------------------------------------------------------------------===// - -class dequantize_inst: public instruction{ -private: - std::string repr_impl() const override { return "dequantize"; } - -protected: - dequantize_inst(type *ty, value *v, value *scale, value *shift, const std::string &name, instruction *next); - -public: - static dequantize_inst *create(value *arg, value *scale, value *shift, type *ty, - const std::string &name = "", instruction *next = nullptr); - - _TRITON_DEFINE_CLONE(dequantize_inst) - _TRITON_DEFINE_ACCEPT(dequantize_inst) -}; - -//===----------------------------------------------------------------------===// -// cast_inst classes -//===----------------------------------------------------------------------===// - -class cast_inst: public unary_inst{ -private: - std::string repr_impl() const; - -protected: - cast_inst(type *ty, value_id_t id, value *v, const std::string &name, instruction *next, cast_op_t op) - : unary_inst(ty, id, v, name, next), op_(op) { } - -private: - static bool is_valid(cast_op_t op, value *arg, type *ty); - -public: - // accessors - cast_op_t get_op() const { return op_; } - - // factory methods - static cast_inst *create(cast_op_t op, value *arg, type *ty, - const std::string &name = "", instruction *next = nullptr); - static cast_inst *create_integer_cast(value *arg, type *ty, bool is_signed, - const std::string &name = "", instruction *next = nullptr); - - _TRITON_DEFINE_ACCEPT(cast_inst) - -private: - cast_op_t op_; -}; - -#define TRITON_IR_DECLARE_CAST_INST_SIMPL(name, id, op) \ -class name : public cast_inst { \ - _TRITON_DEFINE_CLONE(name) \ - friend class cast_inst; \ - name(type *ty, value *v, const std::string &name, instruction *next) \ - : cast_inst(ty, id, v, name, next, op){ } \ -}; - -TRITON_IR_DECLARE_CAST_INST_SIMPL(trunc_inst, INST_CAST_TRUNC, cast_op_t::Trunc) -TRITON_IR_DECLARE_CAST_INST_SIMPL(z_ext_inst, INST_CAST_ZEXT, cast_op_t::ZExt) -TRITON_IR_DECLARE_CAST_INST_SIMPL(s_ext_inst, INST_CAST_SEXT, cast_op_t::SExt) -TRITON_IR_DECLARE_CAST_INST_SIMPL(fp_trunc_inst, INST_CAST_FP_TRUNC, cast_op_t::FPTrunc) -TRITON_IR_DECLARE_CAST_INST_SIMPL(fp_ext_inst, INST_CAST_FP_EXT, cast_op_t::FPExt) -TRITON_IR_DECLARE_CAST_INST_SIMPL(ui_to_fp_inst, INST_CAST_UI_TO_FP, cast_op_t::UIToFP) -TRITON_IR_DECLARE_CAST_INST_SIMPL(si_to_fp_inst, INST_CAST_SI_TO_FP, cast_op_t::SIToFP) -TRITON_IR_DECLARE_CAST_INST_SIMPL(fp_to_ui_inst, INST_CAST_FP_TO_UI, cast_op_t::FPToUI) -TRITON_IR_DECLARE_CAST_INST_SIMPL(fp_to_si_inst, INST_CAST_FP_TO_SI, cast_op_t::FPToSI) -TRITON_IR_DECLARE_CAST_INST_SIMPL(ptr_to_int_inst, INST_CAST_PTR_TO_INT, cast_op_t::PtrToInt) -TRITON_IR_DECLARE_CAST_INST_SIMPL(int_to_ptr_inst, INST_CAST_INT_TO_PTR, cast_op_t::IntToPtr) -TRITON_IR_DECLARE_CAST_INST_SIMPL(bit_cast_inst, INST_CAST_BIT_CAST, cast_op_t::BitCast) -TRITON_IR_DECLARE_CAST_INST_SIMPL(addr_space_cast_inst, INST_CAST_ADDR_SPACE_CAST, cast_op_t::AddrSpaceCast) - -//===----------------------------------------------------------------------===// -// terminator_inst classes -//===----------------------------------------------------------------------===// - -class terminator_inst: public instruction{ - using instruction::instruction; -}; - -// return instruction -class return_inst: public terminator_inst { -private: - std::string repr_impl() const { return "ret"; } - return_inst(context &ctx, value *ret_val, instruction *next); - -public: - // accessors - value *get_return_value() - { return get_num_operands() ? get_operand(0) : nullptr; } - - unsigned get_num_successors() const { return 0; } - - // factory methods - static return_inst* create(context &ctx, value *ret_val = nullptr, instruction *next = nullptr); - - _TRITON_DEFINE_CLONE(return_inst) - _TRITON_DEFINE_ACCEPT(return_inst) -}; - -// base branch instruction -class branch_inst: public terminator_inst{ -private: - std::string repr_impl() const { return "br"; } - -protected: - using terminator_inst::terminator_inst; - -public: - static branch_inst* create(basic_block *dest, - instruction *next = nullptr); - static branch_inst* create(value *cond, basic_block *if_dest, basic_block *else_dest, - instruction *next = nullptr); -}; - -// conditional branch -class cond_branch_inst: public branch_inst { -private: - friend class branch_inst; - cond_branch_inst(basic_block *if_dst, basic_block *else_dst, value *cond, instruction *next); - -public: - basic_block *get_true_dest() { return (basic_block*)get_operand(0); } - basic_block *get_false_dest() { return (basic_block*)get_operand(1); } - value *get_cond() { return get_operand(2); } - _TRITON_DEFINE_CLONE(cond_branch_inst) - _TRITON_DEFINE_ACCEPT(cond_branch_inst) -}; - -// unconditional branch -class uncond_branch_inst: public branch_inst { -private: - friend class branch_inst; - uncond_branch_inst(basic_block *dst, instruction *next); - -public: - basic_block *get_dest() { return (basic_block*)get_operand(0); } - _TRITON_DEFINE_CLONE(uncond_branch_inst) - _TRITON_DEFINE_ACCEPT(uncond_branch_inst) -}; - - -//===----------------------------------------------------------------------===// -// getelementptr_inst classes -//===----------------------------------------------------------------------===// - -class getelementptr_inst: public instruction { -private: - std::string repr_impl() const { return "getelementptr"; } - getelementptr_inst(type *pointee_ty, value *ptr, const std::vector &idx, const std::string &name, instruction *next); - -private: - static type *get_return_type(type *ty, value *ptr, const std::vector &idx); - static type *get_indexed_type_impl(type *ty, const std::vector &idx); - static type *get_indexed_type(type *ty, const std::vector &idx); - -public: - // accessors - type *get_source_elt_ty() { return source_elt_ty; } - op_iterator idx_begin() { return op_begin() + 1; } - op_iterator idx_end() { return op_end(); } - value *get_pointer_operand() { return *op_begin(); } - - // factory methods - static getelementptr_inst* create(value *ptr, const std::vector &idx, - const std::string &name = "", instruction *next = nullptr); - _TRITON_DEFINE_CLONE(getelementptr_inst) - _TRITON_DEFINE_ACCEPT(getelementptr_inst) - -private: - type *source_elt_ty; - type *res_elt_ty; -}; - -//===----------------------------------------------------------------------===// -// load_inst/store_inst classes -//===----------------------------------------------------------------------===// - -class io_inst: public instruction { -public: - - enum EVICTION_POLICY : uint32_t { - NORMAL=0, - EVICT_FIRST, - EVICT_LAST, - }; - -protected: - io_inst(type *ty, value_id_t id, unsigned num_ops, EVICTION_POLICY eviction, - const std::string &name = "", instruction *next = nullptr); - - std::string get_eviction_policy_repr() const { - if (eviction_ == EVICT_FIRST) return ".L1::evict_first"; - if (eviction_ == EVICT_LAST) return ".L2::evict_last"; - return ""; - } - -public: - // accessors - value *get_pointer_operand() { return get_operand(0); } - EVICTION_POLICY get_eviction_policy() const { return eviction_; } - -protected: - EVICTION_POLICY eviction_; -}; - -// load -class load_inst: public io_inst { -public: - enum CACHE_MODIFIER : uint32_t { - NONE=0, - CA, - CG, - }; - - - CACHE_MODIFIER get_cache_modifier() const { return cache_; } - bool get_is_volatile() const { return is_volatile_; } - -protected: - load_inst(value *ptr, value_id_t id, unsigned num_ops, CACHE_MODIFIER cache, EVICTION_POLICY eviction, - bool is_volatile, - const std::string &name = "", instruction *next = nullptr); - std::string get_cache_modifier_repr() const { - if (cache_ == CA) return ".ca"; - if (cache_ == CG) return ".cg"; - return ""; - } - CACHE_MODIFIER cache_; - - std::string get_volatile_repr() { - return is_volatile_ ? ".volatile" : ""; - } - bool is_volatile_; - -private: - static type *get_pointee_type(type *ty); -}; - -// unmasked load -class unmasked_load_inst: public load_inst { -private: - std::string repr_impl() const { return "unmasked_load" + get_cache_modifier_repr(); } - unmasked_load_inst(value *ptr, load_inst::CACHE_MODIFIER cache, load_inst::EVICTION_POLICY eviction, bool is_volatile, const std::string &name, instruction *next); - -public: - static unmasked_load_inst* create(value *ptr, - CACHE_MODIFIER cache, EVICTION_POLICY eviction, - bool is_volatile, - const std::string &name = "", - instruction *next = nullptr); - _TRITON_DEFINE_CLONE(unmasked_load_inst) - _TRITON_DEFINE_ACCEPT(unmasked_load_inst) -}; - -// masked load -class masked_load_inst: public load_inst { -private: - std::string repr_impl() const { return "masked_load" + get_cache_modifier_repr(); } - masked_load_inst(value *ptr, value *mask, value *false_value, load_inst::CACHE_MODIFIER cache, load_inst::EVICTION_POLICY eviction, bool is_volatile, - const std::string &name, instruction *next); - -public: - // accessors - value *get_mask_operand() { return get_operand(1); } - value *get_false_value_operand() { return get_operand(2); } - // factory method - static masked_load_inst* create(value *ptr, value *mask, value *false_value, - CACHE_MODIFIER cache, EVICTION_POLICY eviction, - bool is_volatile, - const std::string &name = "", - instruction *next = nullptr); - _TRITON_DEFINE_CLONE(masked_load_inst) - _TRITON_DEFINE_ACCEPT(masked_load_inst) -}; - -// masked load async -class masked_load_async_inst: public load_inst { -private: - std::string repr_impl() const { return "masked_load_async" + get_cache_modifier_repr(); } - masked_load_async_inst(value *ptr, value *mask, value *false_value, - CACHE_MODIFIER cache, EVICTION_POLICY eviction, - const std::string &name, instruction *next); - -public: - // accessors - value *get_mask_operand() { return get_operand(1); } - value *get_false_value_operand() { return get_operand(2); } - // factory method - static masked_load_async_inst* create(value *ptr, value *mask, value *false_value, - load_inst::CACHE_MODIFIER cache, - EVICTION_POLICY eviction, - const std::string &name = "", - instruction *next = nullptr); - _TRITON_DEFINE_CLONE(masked_load_async_inst) - _TRITON_DEFINE_ACCEPT(masked_load_async_inst) -}; - - - -// store -class store_inst: public io_inst { -protected: - store_inst(value *ptr, value_id_t id, unsigned num_ops, EVICTION_POLICY eviction, - const std::string &name = "", instruction *next = nullptr); - -public: - value *get_value_operand() { return get_operand(1); } -}; - -// unmasked_store -class unmasked_store_inst: public store_inst{ -private: - std::string repr_impl() const { return "unmasked_store"; } - unmasked_store_inst(value *ptr, value *v, EVICTION_POLICY eviction, const std::string &name, instruction *next); - -public: - // factory method - static unmasked_store_inst* create(value* ptr, value *v, EVICTION_POLICY eviction, - const std::string &name = "", - instruction *next = nullptr); - _TRITON_DEFINE_CLONE(unmasked_store_inst) - _TRITON_DEFINE_ACCEPT(unmasked_store_inst) -}; - -class masked_store_inst: public store_inst{ -private: - std::string repr_impl() const { return "masked_store"; } - masked_store_inst(value *ptr, value *v, value *mask, EVICTION_POLICY eviction, - const std::string &name, instruction *next); - -public: - // accessors - value *get_mask_operand() { return get_operand(2); } - // factory method - static masked_store_inst* create(value *ptr, value *v, value *mask, EVICTION_POLICY eviction, - const std::string &name = "", - instruction *next = nullptr); - _TRITON_DEFINE_CLONE(masked_store_inst) - _TRITON_DEFINE_ACCEPT(masked_store_inst) -}; - -//===----------------------------------------------------------------------===// -// struct classes -//===----------------------------------------------------------------------===// - -// insert_value - -class insert_value_inst: public instruction { -private: - std::string repr_impl() const { return "insertvalue"; } - insert_value_inst(value *val, value *elt, size_t idx, const std::string &name, instruction *next); - -public: - static insert_value_inst* create(value *val, value* elt, size_t idx, const std::string &name = "", instruction *next = nullptr); - size_t get_idx() { return idx_; } - _TRITON_DEFINE_CLONE(insert_value_inst) - _TRITON_DEFINE_ACCEPT(insert_value_inst) - -private: - size_t idx_; -}; - -// extract_value - -class extract_value_inst: public instruction { -private: - std::string repr_impl() const { return "extractvalue"; } - extract_value_inst(value *val, size_t idx, const std::string &name, instruction *next); - -public: - static extract_value_inst* create(value *val, size_t idx, const std::string &name = "", instruction *next = nullptr); - size_t get_idx() { return idx_; } - _TRITON_DEFINE_CLONE(extract_value_inst) - _TRITON_DEFINE_ACCEPT(extract_value_inst) - -private: - size_t idx_; -}; - -//===----------------------------------------------------------------------===// -// retile_inst classes -//===----------------------------------------------------------------------===// - -// cat - -class cat_inst: public instruction { -private: - std::string repr_impl() const { return "cat"; } - cat_inst(value *x, value *y, const std::string &name, instruction *next); - -public: - static instruction* create(value *lhs, value *rhs, - const std::string &name = "", - instruction *next = nullptr); - _TRITON_DEFINE_CLONE(cat_inst) - _TRITON_DEFINE_ACCEPT(cat_inst) -}; - -// retile - -class retile_inst: public unary_inst { -protected: - retile_inst(value *arg, value_id_t id, const type::block_shapes_t &shapes, const std::string &name, instruction *next); -}; - -// reshape - -class reshape_inst: public retile_inst { -private: - using retile_inst::retile_inst; - std::string repr_impl() const { return "reshape"; } - -public: - static instruction* create(value *arg, const type::block_shapes_t &shape_suffix, - const std::string &name = "", instruction *next = nullptr); - _TRITON_DEFINE_CLONE(reshape_inst) - _TRITON_DEFINE_ACCEPT(reshape_inst) -}; - -// splat - -class splat_inst: public retile_inst { -private: - using retile_inst::retile_inst; - std::string repr_impl() const { return "splat"; } - -public: - static instruction* create(value *arg, const type::block_shapes_t &shape_suffix, - const std::string &name = "", instruction *next = nullptr); - _TRITON_DEFINE_CLONE(splat_inst) - _TRITON_DEFINE_ACCEPT(splat_inst) -}; - -// broadcast - -class broadcast_inst: public retile_inst { -private: - using retile_inst::retile_inst; - std::string repr_impl() const { return "broadcast"; } - -public: - static instruction* create(value *arg, const type::block_shapes_t &shape_suffix, - const std::string &name = "", instruction *next = nullptr); - _TRITON_DEFINE_CLONE(broadcast_inst) - _TRITON_DEFINE_ACCEPT(broadcast_inst) -}; - - -// downcast - -class downcast_inst: public unary_inst { -private: - using unary_inst::unary_inst; - std::string repr_impl() const { return "downcast"; } - -public: - static instruction* create(value *arg, const std::string &name = "", instruction *next = nullptr); - _TRITON_DEFINE_CLONE(downcast_inst) - _TRITON_DEFINE_ACCEPT(downcast_inst) -}; - -//===----------------------------------------------------------------------===// -// builtin_inst classes -//===----------------------------------------------------------------------===// - -class builtin_inst: public instruction{ -protected: - using instruction::instruction; -}; - -class get_program_id_inst: public builtin_inst { -private: - get_program_id_inst(type *ty, unsigned axis, const std::string &name, instruction *next); - std::string repr_impl() const { return "get_program_id(" + std::to_string(axis_) + ")"; } - -public: - static instruction* create(context &ctx, unsigned axis, const std::string &name = "", instruction *next = nullptr); - unsigned get_axis() const { return axis_; } - _TRITON_DEFINE_CLONE(get_program_id_inst) - _TRITON_DEFINE_ACCEPT(get_program_id_inst) - -private: - unsigned axis_; -}; - -class get_num_programs_inst: public builtin_inst { -private: - get_num_programs_inst(type *ty, unsigned axis, const std::string &name, instruction *next); - std::string repr_impl() const { return "get_num_programs(" + std::to_string(axis_) + ")"; } - -public: - static instruction* create(context &ctx, unsigned axis, const std::string &name = "", instruction *next = nullptr); - unsigned get_axis() const { return axis_; } - _TRITON_DEFINE_CLONE(get_num_programs_inst) - _TRITON_DEFINE_ACCEPT(get_num_programs_inst) - -private: - unsigned axis_; -}; - - -class atomic_inst: public io_inst { -public: - using io_inst::io_inst; - atomic_inst(type *ty, value_id_t id, unsigned num_ops, const std::string &name, instruction *next): - io_inst(ty, id, num_ops, NORMAL, name, next) {} -}; - -class atomic_rmw_inst: public atomic_inst { -private: - atomic_rmw_inst(atomic_rmw_op_t op, value *ptr, value *val, value *msk, const std::string &name = "", instruction *next = nullptr); - std::string repr_impl() const { return "atomic_rmw"; } - _TRITON_DEFINE_CLONE(atomic_rmw_inst) - _TRITON_DEFINE_ACCEPT(atomic_rmw_inst) - -public: - static instruction* create(atomic_rmw_op_t op, value *ptr, value *val, value *msk, const std::string &name = "", instruction *next = nullptr); - atomic_rmw_op_t get_op() { return op_; } - -private: - atomic_rmw_op_t op_; -}; - -class atomic_cas_inst: public atomic_inst { -private: - atomic_cas_inst(value *ptr, value *cmp, value *val, const std::string &name, instruction *next); - std::string repr_impl() const { return "atomic_cas"; } - _TRITON_DEFINE_CLONE(atomic_cas_inst) - _TRITON_DEFINE_ACCEPT(atomic_cas_inst) - -public: - static instruction* create(value *ptr, value *cmp, value *val, const std::string &name = "", instruction *next = nullptr); -}; - -class umulhi_inst: public builtin_inst { -private: - umulhi_inst(value *lhs, value *rhs, const std::string &name = "", instruction *next = nullptr); - std::string repr_impl() const { return "umulhi"; } - _TRITON_DEFINE_CLONE(umulhi_inst) - _TRITON_DEFINE_ACCEPT(umulhi_inst) - -public: - static instruction* create(value *lhs, value *rhs, const std::string &name = "", instruction *next = nullptr); -}; - -class exp_inst: public builtin_inst { -private: - exp_inst(value *val, const std::string &name = "", instruction *next = nullptr); - std::string repr_impl() const { return "exp"; } - _TRITON_DEFINE_CLONE(exp_inst) - _TRITON_DEFINE_ACCEPT(exp_inst) - -public: - static instruction* create(value *val, const std::string &name = "", instruction *next = nullptr); -}; - -class cos_inst: public builtin_inst { -private: - cos_inst(value *val, const std::string &name = "", instruction *next = nullptr); - std::string repr_impl() const { return "cos"; } - _TRITON_DEFINE_CLONE(cos_inst) - _TRITON_DEFINE_ACCEPT(cos_inst) - -public: - static instruction* create(value *val, const std::string &name = "", instruction *next = nullptr); -}; - -class sin_inst: public builtin_inst { -private: - sin_inst(value *val, const std::string &name = "", instruction *next = nullptr); - std::string repr_impl() const { return "sin"; } - _TRITON_DEFINE_CLONE(sin_inst) - _TRITON_DEFINE_ACCEPT(sin_inst) - -public: - static instruction* create(value *val, const std::string &name = "", instruction *next = nullptr); -}; - -class log_inst: public builtin_inst { -private: - log_inst(value *val, const std::string &name = "", instruction *next = nullptr); - std::string repr_impl() const { return "log"; } - _TRITON_DEFINE_CLONE(log_inst) - _TRITON_DEFINE_ACCEPT(log_inst) - -public: - static instruction* create(value *val, const std::string &name = "", instruction *next = nullptr); -}; - - -class dot_inst: public builtin_inst { -public: - enum TransT { NoTrans, Trans }; - enum DataType { - FP8, FP16, BF16, TF32, FP32, - INT1, INT4, INT8, INT32, - UNKNOWN, - }; - -private: - dot_inst(value *A, value *B, value *C, TransT AT, TransT BT, bool allow_tf32, const std::string &name, instruction *next); - std::string repr_impl() const { return "dot"; } - -public: - bool is_prefetched() const { return is_prefetched_; } - void set_prefetched(bool is_prefetched) { is_prefetched_ = is_prefetched; } - bool allow_tf32() const { return allow_tf32_; } - bool is_trans_a() const { return AT_ == Trans; } - bool is_trans_b() const { return BT_ == Trans; } - -public: - static instruction *create(value *A, value *B, value *C, bool AT, bool BT, bool allow_tf32, const std::string &name = "", instruction *next = nullptr); - static instruction* create_nn(value *A, value *B, value *C, bool allow_tf32, const std::string &name = "", instruction *next = nullptr); - static instruction* create_nt(value *A, value *B, value *C, bool allow_tf32, const std::string &name = "", instruction *next = nullptr); - static instruction* create_tn(value *A, value *B, value *C, bool allow_tf32, const std::string &name = "", instruction *next = nullptr); - static instruction* create_tt(value *A, value *B, value *C, bool allow_tf32, const std::string &name = "", instruction *next = nullptr); - _TRITON_DEFINE_CLONE(dot_inst) - _TRITON_DEFINE_ACCEPT(dot_inst) - -private: - bool is_prefetched_ = false; - bool allow_tf32_ = false; - DataType C_type_ = DataType::FP32; - DataType A_type_ = DataType::FP16; - DataType B_type_ = DataType::FP16; - TransT AT_; - TransT BT_; -}; - -//class outer_inst: public builtin_inst { -//private: -// outer_inst(value *A, value *B, value *C, const std::string &name, instruction *next); -//public: -// static instruction* create(value *A, value *B, value *C, const std::string &name = "", instruction *next = nullptr); -//}; - -class trans_inst: public builtin_inst { -public: - ir::type* get_res_ty(ir::type* in, std::vector perm); - std::vector init_perm(ir::type* ty, const std::vector& perm); - -private: - trans_inst(value *arg, const std::vector& perm, const std::string& name, instruction* next); - std::string repr_impl() const { return "trans"; } - -public: - static instruction* create(value *arg, const std::vector &perm = {}, const std::string &name = "", instruction *next = nullptr); - const std::vector get_perm() const; - _TRITON_DEFINE_CLONE(trans_inst) - _TRITON_DEFINE_ACCEPT(trans_inst) - -private: - std::vector perm_; -}; - -class sqrt_inst: public builtin_inst { -private: - sqrt_inst(value *arg, const std::string& name, instruction* next); - std::string repr_impl() const { return "sqrt"; } -public: - static instruction* create(value *arg, const std::string &name = "", instruction *next = nullptr); - _TRITON_DEFINE_CLONE(sqrt_inst) - _TRITON_DEFINE_ACCEPT(sqrt_inst) -}; - -class reduce_inst: public builtin_inst { -public: - enum op_t{ - ADD, SUB, MAX, MIN, UMAX, UMIN, - ARGMAX, ARGMIN, ARGUMAX, ARGUMIN, - FADD, FSUB, FMAX, FMIN, - ARGFMAX, ARGFMIN, - XOR - }; - -private: - static type* get_res_type(value *arg, unsigned axis); - static std::string to_str(op_t op); - -private: - reduce_inst(value* arg, op_t op, unsigned axis, const std::string& name, instruction* next); - std::string repr_impl() const { return "reduce"; } - _TRITON_DEFINE_CLONE(reduce_inst) - _TRITON_DEFINE_ACCEPT(reduce_inst) - -public: - static instruction* create(value *arg, op_t op, unsigned axis, const std::string &name = "", instruction *next = nullptr); - unsigned get_axis() const { return axis_; } - op_t get_op() const { return op_; } - bool with_index() const { - return with_index_ops_.find(op_) != with_index_ops_.end(); - } - -private: - const static inline std::set with_index_ops_ = { - op_t::ARGMAX, op_t::ARGMIN, op_t::ARGUMAX, - op_t::ARGUMIN, op_t::ARGFMAX, op_t::ARGFMIN}; - unsigned axis_; - op_t op_; -}; - - -class select_inst: public builtin_inst { -private: - select_inst(value *pred, value *if_value, value *else_value, const std::string& name, instruction* next); - std::string repr_impl() const { return "select"; } - _TRITON_DEFINE_CLONE(select_inst) - _TRITON_DEFINE_ACCEPT(select_inst) - -public: - static instruction* create(value *pred, value *if_value, value *else_value, const std::string &name = "", instruction *next = nullptr); - value* get_pred_op() { return get_operand(0); } - value* get_if_value_op() { return get_operand(1); } - value* get_else_value_op() { return get_operand(2); } -}; - -//===----------------------------------------------------------------------===// -// intrinsics classes -//===----------------------------------------------------------------------===// - - -class copy_to_shared_inst: public unary_inst{ -private: - using unary_inst::unary_inst; - std::string repr_impl() const { return "copy_to_shared"; } - -public: - static copy_to_shared_inst* create(value *arg, const std::string &name = "", - instruction *next = nullptr); - _TRITON_DEFINE_CLONE(copy_to_shared_inst) - _TRITON_DEFINE_ACCEPT(copy_to_shared_inst) -}; - -class copy_from_shared_inst: public unary_inst{ -private: - using unary_inst::unary_inst; - std::string repr_impl() const { return "copy_from_shared"; } - -public: - static copy_from_shared_inst* create(value *arg, const std::string &name = "", - instruction *next = nullptr); - _TRITON_DEFINE_CLONE(copy_from_shared_inst) - _TRITON_DEFINE_ACCEPT(copy_from_shared_inst) -}; - -class cvt_layout_inst: public unary_inst { -private: - using unary_inst::unary_inst; - std::string repr_impl() const { return "cvt_layout_inst"; } - -public: - static cvt_layout_inst* create(value *arg, const std::string &name = "", instruction *next = nullptr); - _TRITON_DEFINE_CLONE(cvt_layout_inst) - _TRITON_DEFINE_ACCEPT(cvt_layout_inst) -}; - -class barrier_inst: public instruction{ -private: - barrier_inst(context &ctx, const std::string &name, instruction *next); - std::string repr_impl() const { return "barrier"; } - _TRITON_DEFINE_CLONE(barrier_inst) - _TRITON_DEFINE_ACCEPT(barrier_inst) - -public: - static barrier_inst* create(context &ctx, const std::string &name = "", - instruction *next = nullptr); -}; - -class async_wait_inst: public instruction{ -private: - async_wait_inst(context &ctx, int N, const std::string &name, instruction *next); - std::string repr_impl() const { return "async_wait_group " + std::to_string(N_) ; } - _TRITON_DEFINE_CLONE(async_wait_inst) - _TRITON_DEFINE_ACCEPT(async_wait_inst) - -public: - static async_wait_inst* create(context &ctx, int N, - const std::string &name = "", instruction *next = nullptr); - int get_N() { return N_; } - void set_N(int n) { N_ = n; } - -private: - int N_; -}; - -class prefetch_s_inst : public instruction { - std::string repr_impl() const { return "prefetch_s"; } - _TRITON_DEFINE_CLONE(prefetch_s_inst) - _TRITON_DEFINE_ACCEPT(prefetch_s_inst) - - /// inc_: 0->first, 1->latch - int inc_ = 0; -public: - prefetch_s_inst(context &ctx, value *arg, int inc, const std::string &name, instruction *next) - : instruction(type::get_void_ty(ctx), INST_PREFETCH_S, 1, name, next), inc_(inc) { - set_operand(0, arg); - } - int get_inc() const { return inc_; } - static prefetch_s_inst *create(context &ctx, value *arg, int inc, const std::string &name = "", - instruction *next=nullptr); -}; - -/* constant range */ -class make_range: public instruction{ - make_range(type *ty, constant_int* first, constant_int* last); - std::string repr_impl() const { return "make_range[" + first_->repr() + " : " + last_->repr() + "]"; } - _TRITON_DEFINE_CLONE(make_range) - _TRITON_DEFINE_ACCEPT(make_range) - -public: - static make_range *create(constant_int *first, constant_int *last); - const constant_int* get_first() const; - const constant_int* get_last() const; - -private: - constant_int* first_; - constant_int* last_; -}; - -/* timing utilities */ -class clock_inst: public instruction{ - clock_inst(context &ctx, const std::string &name, instruction *next); - std::string repr_impl() const { return "clock"; } - _TRITON_DEFINE_CLONE(clock_inst) - _TRITON_DEFINE_ACCEPT(clock_inst) - -public: - static clock_inst* create(context &ctx, const std::string &name = "", instruction *next = nullptr); -}; - -class globaltimer_inst: public instruction{ - globaltimer_inst(context &ctx, const std::string &name, instruction *next); - std::string repr_impl() const { return "globaltimer"; } - _TRITON_DEFINE_CLONE(globaltimer_inst) - _TRITON_DEFINE_ACCEPT(globaltimer_inst) - -public: - static globaltimer_inst* create(context &ctx, const std::string &name = "", instruction *next = nullptr); -}; - -class extern_elementwise_inst : public instruction { - extern_elementwise_inst(context &ctx, const std::vector &args, - type *dst_ty, const std::string &lib_name, - const std::string &extern_lib_path, - const std::string &symbol_name, - const std::string &name, instruction *next); - std::string repr_impl() const { return "extern_elementwise"; } - _TRITON_DEFINE_CLONE(extern_elementwise_inst) - _TRITON_DEFINE_ACCEPT(extern_elementwise_inst) - - public: - static extern_elementwise_inst *create( - context &ctx, const std::vector &args, type *dst_ty, - const std::string &lib_name = "", const std::string &lib_path = "", - const std::string &symbol_name = "", const std::string &name = "", - instruction *next = nullptr); - - const std::string &get_lib_name() const { return lib_name_; } - const std::string &get_lib_path() const { return lib_path_; } - const std::string &get_symbol_name() const { return symbol_name_; } - - private: - std::string lib_name_; - std::string lib_path_; - std::string symbol_name_; -}; -} -} - -#endif diff --git a/include/triton/ir/metadata.h b/include/triton/ir/metadata.h deleted file mode 100644 index 69512c6b03e6..000000000000 --- a/include/triton/ir/metadata.h +++ /dev/null @@ -1,34 +0,0 @@ -#pragma once - -#ifndef _TRITON_IR_METADATA_H_ -#define _TRITON_IR_METADATA_H_ - -#include - -namespace triton{ -namespace ir{ - - -/* Metadata */ -class metadata{ -public: - enum kind_t{ - multiple_of, - max_contiguous - }; - -private: - metadata(kind_t kind, std::vector value); - -public: - static metadata* get(kind_t kind, std::vector value); - -private: - kind_t kind_; - std::vector value_; -}; - -} -} - -#endif diff --git a/include/triton/ir/module.h b/include/triton/ir/module.h deleted file mode 100644 index d09a51a22284..000000000000 --- a/include/triton/ir/module.h +++ /dev/null @@ -1,129 +0,0 @@ -#pragma once - -#ifndef _TRITON_IR_MODULE_H_ -#define _TRITON_IR_MODULE_H_ - -#include -#include -#include -#include -#include -#include "triton/ir/builder.h" -#include "triton/ir/metadata.h" -#include "triton/ir/context.h" - -namespace triton{ - -namespace lang{ - -class iteration_statement; -class compound_statement; - -} - -namespace ir{ - -class basic_block; -class phi_node; -class value; -class context; -class function; -class attribute; -class function_type; -class constant; -class global_value; -class alloc_const; - -class value_constructor { - typedef std::pair val_key_t; - -private: - phi_node *make_phi(type *ty, unsigned num_values, basic_block *block); - value *try_remove_trivial_phis(ir::phi_node *&phi); - value *add_phi_operands(const std::string& name, phi_node *&phi); - value *get_value_recursive(const std::string& name, basic_block *block); - -public: - value_constructor(builder &builder); - - void set_value(const std::string& name, basic_block* block, value *x); - void set_value(const std::string& name, value* x); - const std::map& get_values() { return values_; } - void set_values(const std::map& values) { values_ = values; } - value *get_value(const std::string& name, basic_block* block); - value *get_value(const std::string& name); - void set_type(const std::string& name, ir::type* ty) { types_[name] = ty; } - // Seal block -- no more predecessors will be added - void seal_block(basic_block *block); - // Metadata - -private: - ir::builder& builder_; - std::map values_; - std::map types_; - std::set sealed_blocks_; - std::map> incomplete_phis_; - std::map current_phi_; -}; - -/* Module */ - -class module { - typedef std::pair val_key_t; - typedef std::pair> md_pair_t; - friend class function; - -public: - typedef std::map symbols_map_t; - typedef std::vector functions_list_t; - -private: - void push_function(function *fn) { functions_.push_back(fn); } - -public: - module(const std::string &name, builder &builder): name_(name), builder_(builder) {} - builder &get_builder() { return builder_; }; - const std::string& get_name() { return name_; }; - - // Functions - const functions_list_t &get_function_list() const { return functions_; } - function *get_function(const std::string& name) { - if(symbols_.find(name) == symbols_.end()) - throw std::runtime_error("function " + name + " is not declared"); - return (function*)symbols_.at(name); - } - function *get_or_insert_function(const std::string &name, function_type *ty); - bool has_function(const std::string& name){ - return symbols_.find(name) != symbols_.end(); - } - void remove_function(ir::function* fn){ - functions_.erase(std::remove(functions_.begin(), functions_.end(), fn), functions_.end()); - } - - void reset_ret_ty(const std::string& name, type* ty); - - // Const allocation - void add_alloc(ir::alloc_const* x) { allocs_.push_back(x); } - const std::vector& allocs() { return allocs_; } - // Register global - void register_global(const std::string& name, ir::value *x) { globals_[name] = x; } - const std::map& globals() const { return globals_; } - // Metadata - void print(std::ostream &os); - void add_metadata(const std::string &name, md_pair_t x) { metadatas_[name] = x; } - const std::map &get_metadatas() const { return metadatas_; } - -private: - std::string name_; - builder &builder_; - functions_list_t functions_; - symbols_map_t symbols_; - std::vector allocs_; - std::map globals_; - std::map metadatas_; -}; - -} -} - -#endif diff --git a/include/triton/ir/print.h b/include/triton/ir/print.h deleted file mode 100644 index 6dbf2fe02614..000000000000 --- a/include/triton/ir/print.h +++ /dev/null @@ -1,22 +0,0 @@ -#ifndef _TRITON_IR_PRINT_H_ -#define _TRITON_IR_PRINT_H_ - -#include "builder.h" - -namespace triton{ -namespace ir{ - -class module; -class function; -class basic_block; -class instruction; - -void print(module &mod, std::ostream& os); -void print(function &func, std::ostream& os); -void print(basic_block &bb, std::ostream& os); -void print(instruction &instr, std::ostream& os); - -} -} - -#endif diff --git a/include/triton/ir/type.h b/include/triton/ir/type.h deleted file mode 100644 index 2c9d252944d1..000000000000 --- a/include/triton/ir/type.h +++ /dev/null @@ -1,252 +0,0 @@ -#pragma once - -#ifndef _TRITON_IR_TYPE_H_ -#define _TRITON_IR_TYPE_H_ - -#include -#include -#include -#include - -namespace triton{ -namespace ir{ - -class context; -class value; -class integer_type; -class constant_int; - -/* Type */ -class type { -public: - typedef std::vector block_shapes_t; - - typedef std::vector contained_tys_vec_t; - typedef contained_tys_vec_t::iterator ty_iterator; - typedef contained_tys_vec_t::const_iterator const_ty_iterator; - -public: - enum id_t { - // primitive types - VoidTyID = 0, ///< type with no size - FP8TyID, ///< 8-bit floating point type (3 bits mantissa) - FP16TyID, ///< 16-bit floating point type (10 bits mantissa) - BF16TyID, ///< 16-bit floating point type (7 bits mantissa) - FP32TyID, ///< 32-bit floating point type - FP64TyID, ///< 64-bit floating point type - LabelTyID, ///< Labels - MetadataTyID, ///< Metadata - TokenTyID, ///< Token - // derived types - IntegerTyID, ///< Arbitrary bit width integers - FunctionTyID, ///< Functions - PointerTyID, ///< Pointers - StructTyID, ///< Struct - BlockTyID, ///< Block - }; - -public: - //constructors - type(context &ctx, id_t id) : ctx_(ctx), id_(id) { } - - //destructor - virtual ~type(){} - - // accessors - context &get_context() const { return ctx_; } - id_t get_type_id() const { return id_; } - // type attributes - unsigned get_fp_mantissa_width() const; - unsigned get_integer_bitwidth() const; - unsigned get_tile_bitwidth() const; - unsigned get_primitive_size_in_bits() const; - type *get_scalar_ty() const; - block_shapes_t get_block_shapes() const; - const size_t get_tile_rank() const; - const size_t get_tile_ranks1() const; - unsigned get_tile_num_elements() const; - type *get_tile_element_ty() const; - unsigned get_pointer_address_space() const; - type *get_pointer_element_ty() const; - unsigned get_struct_numel() const { return contained_tys_.size(); } - type *get_struct_type(unsigned int i) const { return contained_tys_[i]; } - - // primitive predicates - bool is_void_ty() const { return id_ == VoidTyID; } - bool is_fp8_ty() const { return id_ == FP8TyID; } - bool is_fp16_ty() const { return id_ == FP16TyID; } - bool is_bf16_ty() const { return id_ == BF16TyID; } - bool is_fp32_ty() const { return id_ == FP32TyID; } - bool is_fp64_ty() const { return id_ == FP64TyID; } - bool is_label_ty() const { return id_ == LabelTyID;} - bool is_metadata_ty() const { return id_ == MetadataTyID; } - bool is_token_ty() const { return id_ == TokenTyID; } - bool is_integer_ty() const { return id_ == IntegerTyID; } - bool is_bool_ty() const { return is_integer_ty(1); } - bool is_pointer_ty() const { return id_ == PointerTyID; } - bool is_block_ty() const { return id_ == BlockTyID; } - bool is_struct_ty() const { return id_ == StructTyID; } - - // Composite predicates - bool is_int_or_tileint_ty(); - bool is_integer_ty(unsigned width) const; - bool is_floating_point_ty() const; - bool is_sized() const ; - - // Factory methods - // primitive types - static type *get_void_ty(context &ctx); - static type *get_label_ty(context &ctx); - // half - static type *get_fp8_ty(context &ctx); - static type *get_fp16_ty(context &ctx); - static type *get_bf16_ty(context &ctx); - static type *get_fp32_ty(context &ctx); - static type *get_fp64_ty(context &ctx); - // integer types - static integer_type *get_int1_ty(context &ctx); - static integer_type *get_int8_ty(context &ctx); - static integer_type *get_int16_ty(context &ctx); - static integer_type *get_int32_ty(context &ctx); - static integer_type *get_int64_ty(context &ctx); - static integer_type *get_int128_ty(context &ctx); - - // repr - std::string tile_repr() const { - std::string res = get_tile_element_ty()->repr(); - auto shapes = get_block_shapes(); - res += "<"; - for(size_t i = 0; i < shapes.size(); i++){ - if(i > 0) - res += ", "; - res += std::to_string(shapes[i]); - } - res+= ">"; - return res; - } - - std::string repr() const { - switch(id_) { - case VoidTyID: return "void"; - case FP8TyID: return "fp8"; - case BF16TyID: return "bf16"; - case FP16TyID: return "f16"; - case FP32TyID: return "f32"; - case FP64TyID: return "f64"; - case LabelTyID: return "label"; - case MetadataTyID: return "md"; - case TokenTyID: return "tok"; - case IntegerTyID: return ("i") + std::to_string(get_integer_bitwidth()); - case FunctionTyID: return "fn"; - case PointerTyID: return get_pointer_element_ty()->repr() + "*"; - case StructTyID: return "struct"; - case BlockTyID: return tile_repr(); - default: break; - } - throw std::logic_error("unknown type id '" + std::to_string(id_) + "'"); - }; - -private: - context &ctx_; - id_t id_; - -protected: - contained_tys_vec_t contained_tys_; -}; - -class integer_type: public type { - friend class context_impl; - -private: - // constructors - integer_type(context &ctx, unsigned bitwidth) - : type(ctx, IntegerTyID), bitwidth_(bitwidth) {} - -public: - // accessors - unsigned get_bitwidth() const { return bitwidth_; } - - // factory methods - static integer_type* get(context &ctx, unsigned width); - -private: - unsigned bitwidth_; -}; - -class composite_type: public type{ -protected: - using type::type; - -public: - bool index_valid(value *idx) const; - type* get_type_at_index(value *idx) const; -}; - -class struct_type: public composite_type { -public: - struct_type(const contained_tys_vec_t& tys, bool is_packed); - unsigned get_num_types() const { return contained_tys_.size(); } - static struct_type* get(const contained_tys_vec_t& tys, bool is_packed); - -private: - bool is_packed_; -}; - -class block_type: public composite_type { -private: - block_type(type *ty, const block_shapes_t &shapes); - static bool is_valid_elt_ty(type *ty); - -public: - // accessors - const block_shapes_t& get_shapes() const { return shapes_; } - unsigned get_num_elements() const; - unsigned get_bitwidth() const; - - // factory methods - static block_type* get(type *ty, const block_shapes_t &shapes); - static block_type* get_same_shapes(type *ty, type *ref); - -private: - block_shapes_t shapes_; -}; - -class pointer_type: public type { -private: - pointer_type(type *ty, unsigned address_space); - static bool is_valid_elt_ty(type *ty); - -public: - // accessors - unsigned get_address_space() const { return address_space_; } - type *get_element_ty() const { return contained_tys_[0]; } - // factory methods - static pointer_type* get(type *ty, unsigned address_space); - -private: - unsigned address_space_; -}; - -class function_type: public type { -private: - function_type(type *ret_ty, const std::vector ¶m_tys); - -public: - // accessors - unsigned get_num_params() const { return contained_tys_.size() - 1; } - const_ty_iterator params_begin() const { return contained_tys_.begin() + 1; } - const_ty_iterator params_end() const { return contained_tys_.end(); } - ty_iterator params_begin() { return contained_tys_.begin() + 1; } - ty_iterator params_end() { return contained_tys_.end(); } - type* get_param_ty(unsigned i) const { return contained_tys_.at(1 + i); } - type* get_return_ty() const { return contained_tys_.at(0); } - void reset_ret_ty(type* ty) { contained_tys_[0] = ty;} - // factory methods - static function_type* get(type *ret_ty, const std::vector& param_tys); -}; - - -} -} - -#endif diff --git a/include/triton/ir/utils.h b/include/triton/ir/utils.h deleted file mode 100644 index 1fad79181f15..000000000000 --- a/include/triton/ir/utils.h +++ /dev/null @@ -1,31 +0,0 @@ -#pragma once - -#ifndef _TRITON_IR_CFG_H_ -#define _TRITON_IR_CFG_H_ - -#include -#include - -namespace triton{ -namespace ir{ - -class module; -class function; -class basic_block; -class instruction; -class value; - -class cfg { -public: - static std::vector post_order(function* fn); - static std::vector reverse_post_order(function* fn); -}; - -void for_each_instruction(ir::module& mod, const std::function &fn); -void for_each_instruction_backward(module &mod, const std::function &do_work); -void for_each_value(ir::module& mod, const std::function &fn); - -} -} - -#endif diff --git a/include/triton/ir/value.h b/include/triton/ir/value.h deleted file mode 100644 index fde09121a047..000000000000 --- a/include/triton/ir/value.h +++ /dev/null @@ -1,95 +0,0 @@ -#pragma once - -#ifndef _TRITON_IR_VALUE_H_ -#define _TRITON_IR_VALUE_H_ - -#include -#include -#include - -namespace triton{ -namespace ir{ - -class type; -class use; -class user; -class visitor; - -//===----------------------------------------------------------------------===// -// value class -//===----------------------------------------------------------------------===// - -class value { -public: - typedef std::vector users_t; - -public: - // constructor - value(type *ty, const std::string &name = ""); - virtual ~value(){ } - // uses - void add_use(user* arg); - users_t::iterator erase_use(user* arg); - const std::vector &get_users() { return users_; } - void replace_all_uses_with(value *target); - // name - void set_name(const std::string &name); - const std::string &get_name() const { return name_; } - bool has_name() const { return !name_.empty(); } - type* get_type() const { return ty_; } - // visitor - virtual void accept(visitor *v) = 0; - -private: - std::string name_; - -protected: - type *ty_; - users_t users_; -}; - -//===----------------------------------------------------------------------===// -// user class -//===----------------------------------------------------------------------===// - -class user: public value{ -public: - typedef std::vector ops_t; - typedef ops_t::iterator op_iterator; - typedef ops_t::const_iterator const_op_iterator; - -protected: - void resize_ops(unsigned num_ops) { ops_.resize(num_ops + num_hidden_); num_ops_ = num_ops; } - void resize_hidden(unsigned num_hidden) { ops_.resize(num_ops_ + num_hidden); num_hidden_ = num_hidden; } - -public: - // Constructor - user(type *ty, unsigned num_ops, const std::string &name = "") - : value(ty, name), ops_(num_ops), num_ops_(num_ops), num_hidden_(0){ - } - virtual ~user() { } - - // Operands - const ops_t& ops() { return ops_; } - const ops_t& ops() const { return ops_; } - op_iterator op_begin() { return ops_.begin(); } - op_iterator op_end() { return ops_.end(); } - void set_operand(unsigned i, value *x); - value *get_operand(unsigned i) const; - unsigned get_num_operands() const ; - unsigned get_num_hidden() const; - - // Utils - value::users_t::iterator replace_uses_of_with(value *before, value *after); - - -private: - ops_t ops_; - unsigned num_ops_; - unsigned num_hidden_; -}; - -} -} - -#endif diff --git a/include/triton/ir/visitor.h b/include/triton/ir/visitor.h deleted file mode 100644 index b03b5f4fe92c..000000000000 --- a/include/triton/ir/visitor.h +++ /dev/null @@ -1,191 +0,0 @@ -#pragma once - -#ifndef _TRITON_IR_VISITOR_H_ -#define _TRITON_IR_VISITOR_H_ - - -namespace triton{ -namespace ir{ - -class value; - -class instruction; - -class call_inst; -class launch_inst; - -class phi_node; -class binary_operator; -class getelementptr_inst; - -class icmp_inst; -class fcmp_inst; -class dequantize_inst; -class cast_inst; -class trunc_inst; -class z_ext_inst; -class s_ext_inst; -class fp_trunc_inst; -class fp_ext_inst; -class ui_to_fp_inst; -class si_to_fp_inst; -class fp_to_ui_inst; -class fp_to_si_inst; -class ptr_to_int_inst; -class int_to_ptr_inst; -class bit_cast_inst; -class addr_space_cast_inst; - -class return_inst; -class cond_branch_inst; -class uncond_branch_inst; - - -class unmasked_load_inst; -class masked_load_inst; -class unmasked_store_inst; -class masked_store_inst; - -class extract_value_inst; -class insert_value_inst; - -class retile_inst; -class reshape_inst; -class splat_inst; -class cat_inst; -class broadcast_inst; -class downcast_inst; - -class umulhi_inst; -class exp_inst; -class cos_inst; -class sin_inst; -class log_inst; - -class get_program_id_inst; -class get_num_programs_inst; -class atomic_inst; -class atomic_cas_inst; -class atomic_rmw_inst; -class dot_inst; -class trans_inst; -class sqrt_inst; -class reduce_inst; -class select_inst; - -class cvt_layout_inst; -class copy_to_shared_inst; -class copy_from_shared_inst; -class masked_load_async_inst; -class barrier_inst; -class async_wait_inst; -class make_range_dyn; -class make_range; -class prefetch_s_inst; -class clock_inst; -class globaltimer_inst; - -class extern_elementwise_inst; - -class make_range_sta; -class undef_value; -class constant_int; -class constant_fp; -class global_value; -class global_object; -class alloc_const; - -class constant_fp; -class undef_value; -class constant_int; -class constant_fp; -class global_value; -class global_object; -class alloc_const; - -class function; - -class basic_block; - -class argument; - -class visitor { -public: - virtual ~visitor() {} - - virtual void visit_value(ir::value*); - virtual void visit_call_inst(ir::call_inst*) = 0; - virtual void visit_launch_inst(ir::launch_inst*) = 0; - - virtual void visit_basic_block(basic_block*) = 0; - virtual void visit_argument(argument*) = 0; - virtual void visit_phi_node(phi_node*) = 0; - virtual void visit_binary_operator(binary_operator*) = 0; - virtual void visit_getelementptr_inst(getelementptr_inst*) = 0; - - virtual void visit_icmp_inst(icmp_inst*) = 0; - virtual void visit_fcmp_inst(fcmp_inst*) = 0; - virtual void visit_dequantize_inst(dequantize_inst*) = 0; - virtual void visit_cast_inst(cast_inst*) = 0; - - virtual void visit_return_inst(return_inst*) = 0; - virtual void visit_cond_branch_inst(cond_branch_inst*) = 0; - virtual void visit_uncond_branch_inst(uncond_branch_inst*) = 0; - - - virtual void visit_unmasked_load_inst(unmasked_load_inst*) = 0; - virtual void visit_masked_load_inst(masked_load_inst*) = 0; - virtual void visit_unmasked_store_inst(unmasked_store_inst*) = 0; - virtual void visit_masked_store_inst(masked_store_inst*) = 0; - - virtual void visit_umulhi_inst(umulhi_inst*) = 0; - virtual void visit_exp_inst(exp_inst*) = 0; - virtual void visit_cos_inst(cos_inst*) = 0; - virtual void visit_sin_inst(sin_inst*) = 0; - virtual void visit_log_inst(log_inst*) = 0; - - virtual void visit_extract_value_inst(extract_value_inst*) = 0; - virtual void visit_insert_value_inst(insert_value_inst*) = 0; - - virtual void visit_reshape_inst(reshape_inst*) = 0; - virtual void visit_splat_inst(splat_inst*) = 0; - virtual void visit_cat_inst(cat_inst*) = 0; - virtual void visit_broadcast_inst(broadcast_inst*) = 0; - virtual void visit_downcast_inst(downcast_inst*) = 0; - - virtual void visit_get_program_id_inst(get_program_id_inst*) = 0; - virtual void visit_get_num_programs_inst(get_num_programs_inst*) = 0; - virtual void visit_atomic_cas_inst(atomic_cas_inst*) = 0; - virtual void visit_atomic_rmw_inst(atomic_rmw_inst*) = 0; - virtual void visit_dot_inst(dot_inst*) = 0; - virtual void visit_trans_inst(trans_inst*) = 0; - virtual void visit_sqrt_inst(sqrt_inst*) = 0; - virtual void visit_reduce_inst(reduce_inst*) = 0; - virtual void visit_select_inst(select_inst*) = 0; - - virtual void visit_cvt_layout_inst(cvt_layout_inst*) = 0; - virtual void visit_copy_to_shared_inst(copy_to_shared_inst*) = 0; - virtual void visit_copy_from_shared_inst(copy_from_shared_inst*) = 0; - - - virtual void visit_masked_load_async_inst(masked_load_async_inst*)= 0; - virtual void visit_barrier_inst(barrier_inst*) = 0; - virtual void visit_async_wait_inst(async_wait_inst*) = 0; - virtual void visit_make_range(make_range*) = 0; - virtual void visit_prefetch_s_inst(prefetch_s_inst*) = 0; - virtual void visit_function(function*) = 0; - virtual void visit_clock_inst(clock_inst*) = 0; - virtual void visit_globaltimer_inst(globaltimer_inst*) = 0; - - virtual void visit_undef_value(undef_value*) = 0; - virtual void visit_constant_int(constant_int*) = 0; - virtual void visit_constant_fp(constant_fp*) = 0; - virtual void visit_alloc_const(alloc_const*) = 0; - - virtual void visit_extern_elementwise_inst(extern_elementwise_inst*) = 0; -}; - -} -} - -#endif diff --git a/include/triton/tools/bench.hpp b/include/triton/tools/bench.hpp deleted file mode 100644 index c0dbd5061792..000000000000 --- a/include/triton/tools/bench.hpp +++ /dev/null @@ -1,54 +0,0 @@ -#pragma once - -#ifndef _TRITON_TOOLS_BENCH_H_ -#define _TRITON_TOOLS_BENCH_H_ - -#include -#include -#include -#include "triton/driver/device.h" -#include "triton/driver/stream.h" - -namespace triton{ -namespace tools{ - -class timer{ - typedef std::chrono::high_resolution_clock high_resolution_clock; - typedef std::chrono::nanoseconds nanoseconds; - -public: - explicit timer(bool run = false) - { if (run) start(); } - - void start() - { _start = high_resolution_clock::now(); } - - nanoseconds get() const - { return std::chrono::duration_cast(high_resolution_clock::now() - _start); } - -private: - high_resolution_clock::time_point _start; -}; - -inline double bench(std::function const & op, driver::stream * stream, size_t warmup = 10, size_t repeat = 200) -{ - timer tmr; - std::vector times; - double total_time = 0; - for(size_t i = 0; i < warmup; i++) - op(); - stream->synchronize(); - tmr.start(); - for(size_t i = 0; i < repeat; i++){ - op(); - } - stream->synchronize(); - return (float)tmr.get().count() / repeat; - -// return *std::min_element(times.begin(), times.end()); -} - -} -} - -#endif diff --git a/include/triton/tools/graph.h b/include/triton/tools/graph.h deleted file mode 100644 index 69afd5bb3e66..000000000000 --- a/include/triton/tools/graph.h +++ /dev/null @@ -1,70 +0,0 @@ -#pragma once - -#ifndef _TRITON_TOOLS_THREAD_GRAPH_H_ -#define _TRITON_TOOLS_THREAD_GRAPH_H_ - -#include "llvm/ADT/SetVector.h" - -#include -#include -#include - -namespace triton { -namespace tools{ - -template -class graph { - typedef std::map> edges_t; - -public: - typedef std::map> cmap_t; - typedef std::map nmap_t; - -private: - void connected_components_impl(node_t x, llvm::SetVector &nodes, - nmap_t* nmap, cmap_t* cmap, int id) const { - if(nmap) - (*nmap)[x] = id; - if(cmap) - (*cmap)[id].push_back(x); - if (nodes.count(x)) { - nodes.remove(x); - for(const node_t &y: edges_.at(x)) - connected_components_impl(y, nodes, nmap, cmap, id); - } - } - -public: - void connected_components(cmap_t *cmap, nmap_t *nmap) const { - if(cmap) - cmap->clear(); - if(nmap) - nmap->clear(); - llvm::SetVector nodes = nodes_; - unsigned id = 0; - while(!nodes.empty()){ - connected_components_impl(*nodes.begin(), nodes, nmap, cmap, id++); - } - } - - void add_edge(node_t x, node_t y) { - nodes_.insert(x); - nodes_.insert(y); - edges_[x].insert(y); - edges_[y].insert(x); - } - - void clear() { - nodes_.clear(); - edges_.clear(); - } - -private: - llvm::SetVector nodes_; - edges_t edges_; -}; - -} -} - -#endif diff --git a/include/triton/tools/sha1.hpp b/include/triton/tools/sha1.hpp deleted file mode 100644 index 630a3fd7719b..000000000000 --- a/include/triton/tools/sha1.hpp +++ /dev/null @@ -1,186 +0,0 @@ -/* - Copyright (c) 2011, Micael Hildenborg - All rights reserved. - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of Micael Hildenborg nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - THIS SOFTWARE IS PROVIDED BY Micael Hildenborg ''AS IS'' AND ANY - EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL Micael Hildenborg BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* - Contributors: - Gustav - Several members in the gamedev.se forum. - Gregory Petrosyan - */ - -#ifndef _TRITON_TOOLS_SHA1_HPP_ -#define _TRITON_TOOLS_SHA1_HPP_ - -namespace sha1 -{ - namespace // local - { - // Rotate an integer value to left. - inline unsigned int rol(const unsigned int value, - const unsigned int steps) - { - return ((value << steps) | (value >> (32 - steps))); - } - - // Sets the first 16 integers in the buffert to zero. - // Used for clearing the W buffert. - inline void clearWBuffert(unsigned int* buffert) - { - for (int pos = 16; --pos >= 0;) - { - buffert[pos] = 0; - } - } - - inline void innerHash(unsigned int* result, unsigned int* w) - { - unsigned int a = result[0]; - unsigned int b = result[1]; - unsigned int c = result[2]; - unsigned int d = result[3]; - unsigned int e = result[4]; - - int round = 0; - - #define sha1macro(func,val) \ - { \ - const unsigned int t = rol(a, 5) + (func) + e + val + w[round]; \ - e = d; \ - d = c; \ - c = rol(b, 30); \ - b = a; \ - a = t; \ - } - - while (round < 16) - { - sha1macro((b & c) | (~b & d), 0x5a827999) - ++round; - } - while (round < 20) - { - w[round] = rol((w[round - 3] ^ w[round - 8] ^ w[round - 14] ^ w[round - 16]), 1); - sha1macro((b & c) | (~b & d), 0x5a827999) - ++round; - } - while (round < 40) - { - w[round] = rol((w[round - 3] ^ w[round - 8] ^ w[round - 14] ^ w[round - 16]), 1); - sha1macro(b ^ c ^ d, 0x6ed9eba1) - ++round; - } - while (round < 60) - { - w[round] = rol((w[round - 3] ^ w[round - 8] ^ w[round - 14] ^ w[round - 16]), 1); - sha1macro((b & c) | (b & d) | (c & d), 0x8f1bbcdc) - ++round; - } - while (round < 80) - { - w[round] = rol((w[round - 3] ^ w[round - 8] ^ w[round - 14] ^ w[round - 16]), 1); - sha1macro(b ^ c ^ d, 0xca62c1d6) - ++round; - } - - #undef sha1macro - - result[0] += a; - result[1] += b; - result[2] += c; - result[3] += d; - result[4] += e; - } - } // namespace - - inline void calc(const void* src, const int bytelength, unsigned char* hash) - { - // Init the result array. - unsigned int result[5] = { 0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, 0xc3d2e1f0 }; - - // Cast the void src pointer to be the byte array we can work with. - const unsigned char* sarray = (const unsigned char*) src; - - // The reusable round buffer - unsigned int w[80]; - - // Loop through all complete 64byte blocks. - const int endOfFullBlocks = bytelength - 64; - int endCurrentBlock; - int currentBlock = 0; - - while (currentBlock <= endOfFullBlocks) - { - endCurrentBlock = currentBlock + 64; - - // Init the round buffer with the 64 byte block data. - for (int roundPos = 0; currentBlock < endCurrentBlock; currentBlock += 4) - { - // This line will swap endian on big endian and keep endian on little endian. - w[roundPos++] = (unsigned int) sarray[currentBlock + 3] - | (((unsigned int) sarray[currentBlock + 2]) << 8) - | (((unsigned int) sarray[currentBlock + 1]) << 16) - | (((unsigned int) sarray[currentBlock]) << 24); - } - innerHash(result, w); - } - - // Handle the last and not full 64 byte block if existing. - endCurrentBlock = bytelength - currentBlock; - clearWBuffert(w); - int lastBlockBytes = 0; - for (;lastBlockBytes < endCurrentBlock; ++lastBlockBytes) - { - w[lastBlockBytes >> 2] |= (unsigned int) sarray[lastBlockBytes + currentBlock] << ((3 - (lastBlockBytes & 3)) << 3); - } - w[lastBlockBytes >> 2] |= 0x80 << ((3 - (lastBlockBytes & 3)) << 3); - if (endCurrentBlock >= 56) - { - innerHash(result, w); - clearWBuffert(w); - } - w[15] = bytelength << 3; - innerHash(result, w); - - // Store hash in result pointer, and make sure we get in in the correct order on both endian models. - for (int hashByte = 20; --hashByte >= 0;) - { - hash[hashByte] = (result[hashByte >> 2] >> (((3 - hashByte) & 0x3) << 3)) & 0xff; - } - } - - inline void toHexString(const unsigned char* hash, char* hexstring) - { - const char hexDigits[] = { "0123456789abcdef" }; - - for (int hashByte = 20; --hashByte >= 0;) - { - hexstring[hashByte << 1] = hexDigits[(hash[hashByte] >> 4) & 0xf]; - hexstring[(hashByte << 1) + 1] = hexDigits[hash[hashByte] & 0xf]; - } - hexstring[40] = 0; - } -} // namespace sha1 - -#endif diff --git a/include/triton/tools/sys/exec.hpp b/include/triton/tools/sys/exec.hpp deleted file mode 100644 index 5b664553e9ea..000000000000 --- a/include/triton/tools/sys/exec.hpp +++ /dev/null @@ -1,46 +0,0 @@ -#ifndef TRITON_TOOLS_SYS_EXEC_HPP -#define TRITON_TOOLS_SYS_EXEC_HPP - -#include -#include -#include -#include -#include - -namespace triton -{ -namespace tools -{ - - -#ifdef _WIN32 -#define popen _popen -#define pclose _pclose -#endif - -#ifndef WEXITSTATUS -#define WEXITSTATUS(stat_val) ((unsigned)(stat_val) & 255) -#endif - -int exec(const std::string& cmd, std::string& result) { - char buffer[128]; - FILE* pipe = popen(cmd.c_str(), "r"); - if (!pipe) - return 0; - result.clear(); - try { - while (fgets(buffer, sizeof buffer, pipe) != NULL) - result += buffer; - } catch (...) { - pclose(pipe); - return 0; - } - int status = pclose(pipe); - return WEXITSTATUS(status); - -} - -} -} - -#endif diff --git a/include/triton/tools/sys/mkdir.hpp b/include/triton/tools/sys/mkdir.hpp deleted file mode 100755 index 5198a0098436..000000000000 --- a/include/triton/tools/sys/mkdir.hpp +++ /dev/null @@ -1,76 +0,0 @@ -/* - * Copyright (c) 2015, PHILIPPE TILLET. All rights reserved. - * - * This file is part of ISAAC. - * - * ISAAC is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, - * MA 02110-1301 USA - */ - -#ifndef TDL_TOOLS_SYS_MKDIR_HPP -#define TDL_TOOLS_SYS_MKDIR_HPP - -#include -#include -#include -#include -#include -#if defined(_WIN32) - #include -#endif - -namespace triton -{ - -namespace tools -{ - - inline int mkdir(std::string const & path) - { - #if defined(_WIN32) - return _mkdir(path.c_str()); - #else - return ::mkdir(path.c_str(), 0777); - #endif - } - - inline int mkpath(std::string const & path) - { - int status = 0; - size_t pp = 0; - size_t sp; - while ((sp = path.find('/', pp)) != std::string::npos) - { - if (sp != pp){ - status = mkdir(path.substr(0, sp)); - } - pp = sp + 1; - } - return (status==0 || errno==EEXIST)?0:-1; - } - - inline int mtime(std::string const & path) - { - struct stat st; - if(stat(path.c_str(), &st) != 0) - return 0; - return st.st_mtime; - } - -} - -} - -#endif diff --git a/include/triton/tools/thread_pool.h b/include/triton/tools/thread_pool.h deleted file mode 100644 index fbcf2b6846f2..000000000000 --- a/include/triton/tools/thread_pool.h +++ /dev/null @@ -1,90 +0,0 @@ -#pragma once - -#ifndef _TRITON_TOOLS_THREAD_POOL_H_ -#define _TRITON_TOOLS_THREAD_POOL_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -class ThreadPool { -public: - ThreadPool(size_t threads) - : stop(false) { - for(size_t i = 0;i < threads;++i) - workers.emplace_back( - [this] { - for(;;){ - std::function task; - { - std::unique_lock lock(this->queue_mutex); - this->condition.wait(lock, - [this]{ return this->stop || !this->tasks.empty(); }); - if(this->stop && this->tasks.empty()) - return; - task = std::move(this->tasks.front()); - this->tasks.pop(); - } - task(); - } - } - ); - } - - - template - auto enqueue(F&& f, Args&&... args) - -> std::future::type> - { - using return_type = typename std::result_of::type; - - auto task = std::make_shared< std::packaged_task >( - std::bind(std::forward(f), std::forward(args)...) - ); - - std::future res = task->get_future(); - { - std::unique_lock lock(queue_mutex); - - // don't allow enqueueing after stopping the pool - if(stop) - throw std::runtime_error("enqueue on stopped ThreadPool"); - - tasks.emplace([task](){ (*task)(); }); - } - condition.notify_one(); - return res; - } - - - ~ThreadPool() { - { - std::unique_lock lock(queue_mutex); - stop = true; - } - condition.notify_all(); - for(std::thread &worker: workers) - worker.join(); - } - - -private: - // need to keep track of threads so we can join them - std::vector< std::thread > workers; - // the task queue - std::queue< std::function > tasks; - - // synchronization - std::mutex queue_mutex; - std::condition_variable condition; - bool stop; -}; - - -#endif diff --git a/lib/Analysis/Alias.cpp b/lib/Analysis/Alias.cpp new file mode 100644 index 000000000000..db01e6fc3113 --- /dev/null +++ b/lib/Analysis/Alias.cpp @@ -0,0 +1,67 @@ +#include "triton/Analysis/Alias.h" +#include "mlir/Dialect/Tensor/IR/Tensor.h" +#include "triton/Analysis/Utility.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" + +namespace mlir { + +AliasInfo AliasInfo::join(const AliasInfo &lhs, const AliasInfo &rhs) { + if (lhs == rhs) + return lhs; + AliasInfo ret; + for (auto value : lhs.allocs) { + ret.insert(value); + } + for (auto value : rhs.allocs) { + ret.insert(value); + } + return ret; +} + +ChangeResult SharedMemoryAliasAnalysis::visitOperation( + Operation *op, ArrayRef *> operands) { + AliasInfo aliasInfo; + bool pessimistic = true; + if (maybeSharedAllocationOp(op)) { + // These ops may allocate a new shared memory buffer. + auto result = op->getResult(0); + // FIXME(Keren): extract and insert are always alias for now + if (isa(op)) { + // extract_slice %src + aliasInfo = AliasInfo(operands[0]->getValue()); + pessimistic = false; + } else if (isa(op) || + isa(op)) { + // insert_slice_async %src, %dst, %index + // insert_slice %src into %dst[%offsets] + aliasInfo = AliasInfo(operands[1]->getValue()); + pessimistic = false; + } else if (isSharedEncoding(result)) { + aliasInfo.insert(result); + pessimistic = false; + } + } + + if (pessimistic) { + return markAllPessimisticFixpoint(op->getResults()); + } + // Join all lattice elements + ChangeResult result = ChangeResult::NoChange; + for (Value value : op->getResults()) { + result |= getLatticeElement(value).join(aliasInfo); + } + return result; +} + +AliasResult SharedMemoryAliasAnalysis::alias(Value lhs, Value rhs) { + // TODO: implement + return AliasResult::MayAlias; +} + +ModRefResult SharedMemoryAliasAnalysis::getModRef(Operation *op, + Value location) { + // TODO: implement + return ModRefResult::getModAndRef(); +} + +} // namespace mlir diff --git a/lib/Analysis/Allocation.cpp b/lib/Analysis/Allocation.cpp new file mode 100644 index 000000000000..aad43241c16f --- /dev/null +++ b/lib/Analysis/Allocation.cpp @@ -0,0 +1,476 @@ +#include "triton/Analysis/Allocation.h" +#include "mlir/Analysis/Liveness.h" +#include "mlir/Analysis/SliceAnalysis.h" +#include "mlir/Dialect/Tensor/IR/Tensor.h" +#include "triton/Analysis/Alias.h" +#include "triton/Analysis/Utility.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" +#include "llvm/ADT/SmallVector.h" + +#include +#include +#include + +using ::mlir::triton::gpu::BlockedEncodingAttr; +using ::mlir::triton::gpu::DotOperandEncodingAttr; +using ::mlir::triton::gpu::getContigPerThread; +using ::mlir::triton::gpu::getOrder; +using ::mlir::triton::gpu::getShapePerCTA; +using ::mlir::triton::gpu::getSizePerThread; +using ::mlir::triton::gpu::MmaEncodingAttr; +using ::mlir::triton::gpu::SharedEncodingAttr; +using ::mlir::triton::gpu::SliceEncodingAttr; + +namespace mlir { + +//===----------------------------------------------------------------------===// +// Shared Memory Allocation Analysis +//===----------------------------------------------------------------------===// +namespace triton { + +// Bitwidth of pointers +constexpr int kPtrBitWidth = 64; + +static std::pair, SmallVector> +getCvtOrder(const Attribute &srcLayout, const Attribute &dstLayout) { + auto srcBlockedLayout = srcLayout.dyn_cast(); + auto srcMmaLayout = srcLayout.dyn_cast(); + auto srcDotLayout = srcLayout.dyn_cast(); + auto dstBlockedLayout = dstLayout.dyn_cast(); + auto dstMmaLayout = dstLayout.dyn_cast(); + auto dstDotLayout = dstLayout.dyn_cast(); + assert(!(srcMmaLayout && dstMmaLayout) && + "Unexpected mma -> mma layout conversion"); + // mma or dot layout does not have an order, so the order depends on the + // layout of the other operand. + auto inOrd = (srcMmaLayout || srcDotLayout) ? getOrder(dstLayout) + : getOrder(srcLayout); + auto outOrd = (dstMmaLayout || dstDotLayout) ? getOrder(srcLayout) + : getOrder(dstLayout); + + return {inOrd, outOrd}; +} + +SmallVector +getScratchConfigForCvtLayout(triton::gpu::ConvertLayoutOp op, unsigned &inVec, + unsigned &outVec) { + auto srcTy = op.src().getType().cast(); + auto dstTy = op.result().getType().cast(); + Attribute srcLayout = srcTy.getEncoding(); + Attribute dstLayout = dstTy.getEncoding(); + assert(srcLayout && dstLayout && + "Unexpect layout in getScratchConfigForCvtLayout()"); + auto [inOrd, outOrd] = getCvtOrder(srcLayout, dstLayout); + unsigned srcContigPerThread = getContigPerThread(srcLayout)[inOrd[0]]; + unsigned dstContigPerThread = getContigPerThread(dstLayout)[outOrd[0]]; + // TODO: Fix the legacy issue that ourOrd[0] == 0 always means + // that we cannot do vectorization. + inVec = outOrd[0] == 0 ? 1 : inOrd[0] == 0 ? 1 : srcContigPerThread; + outVec = outOrd[0] == 0 ? 1 : dstContigPerThread; + + auto srcShapePerCTA = getShapePerCTA(srcLayout); + auto dstShapePerCTA = getShapePerCTA(dstLayout); + + unsigned rank = dstTy.getRank(); + SmallVector paddedRepShape(rank); + unsigned pad = std::max(inVec, outVec); + for (unsigned d = 0; d < rank; ++d) { + paddedRepShape[d] = + std::max(std::min(srcTy.getShape()[d], srcShapePerCTA[d]), + std::min(dstTy.getShape()[d], dstShapePerCTA[d])); + } + if (rank == 1) + return paddedRepShape; + unsigned paddedDim = 1; + if (auto dstBlockedLayout = dstLayout.dyn_cast()) { + paddedDim = dstBlockedLayout.getOrder()[0]; + } + paddedRepShape[paddedDim] += pad; + return paddedRepShape; +} + +// TODO: extend beyond scalars +SmallVector getScratchConfigForAtomicRMW(triton::AtomicRMWOp op) { + SmallVector smemShape; + if (op.ptr().getType().isa()) { + // do nothing or just assert because shared memory is not used in tensor up + // to now + } else { + // need only bytes for scalar + // always vec = 1 and elemsPerThread = 1 for scalar? + smemShape.push_back(1); + } + return smemShape; +} + +SmallVector getScratchConfigForAtomicCAS(triton::AtomicCASOp op) { + return SmallVector{1}; +} + +class AllocationAnalysis { +public: + AllocationAnalysis(Operation *operation, Allocation *allocation) + : operation(operation), allocation(allocation) { + run(); + } + +private: + using BufferT = Allocation::BufferT; + + /// Value -> Liveness Range + /// Use MapVector to ensure determinism. + using BufferRangeMapT = llvm::MapVector>; + /// Nodes -> Nodes + using GraphT = DenseMap>; + + void run() { + getValuesAndSizes(); + resolveLiveness(); + computeOffsets(); + } + + /// Initializes explicitly defined shared memory values for a given operation. + void getExplicitValueSize(Operation *op) { + // Values returned from scf.yield will not be allocated even though they + // have the shared encoding. + // For example: %a = scf.if -> yield + // %a must be allocated elsewhere by other operations. + // FIXME(Keren): extract and insert are always alias for now + if (!maybeSharedAllocationOp(op) || maybeAliasOp(op)) { + return; + } + + for (Value result : op->getResults()) { + if (isSharedEncoding(result)) { + // Bytes could be a different value once we support padding or other + // allocation policies. + auto tensorType = result.getType().dyn_cast(); + auto bytes = tensorType.getNumElements() * + tensorType.getElementTypeBitWidth() / 8; + allocation->addBuffer(result, bytes); + } + } + } + + /// Initializes temporary shared memory for a given operation. + void getScratchValueSize(Operation *op) { + if (auto reduceOp = dyn_cast(op)) { + ReduceOpHelper helper(reduceOp); + unsigned bytes = helper.getScratchSizeInBytes(); + allocation->addBuffer(op, bytes); + } else if (auto cvtLayout = dyn_cast(op)) { + auto srcTy = cvtLayout.src().getType().cast(); + auto dstTy = cvtLayout.result().getType().cast(); + auto srcEncoding = srcTy.getEncoding(); + auto dstEncoding = dstTy.getEncoding(); + if (srcEncoding.isa() || + dstEncoding.isa()) { + // Conversions from/to shared memory do not need scratch memory. + return; + } + // ConvertLayoutOp with both input/output non-shared_layout + // TODO: Besides of implementing ConvertLayoutOp via shared memory, it's + // also possible to realize it with other approaches in restricted + // conditions, such as warp-shuffle + unsigned inVec = 0; + unsigned outVec = 0; + auto smemShape = getScratchConfigForCvtLayout(cvtLayout, inVec, outVec); + unsigned elems = std::accumulate(smemShape.begin(), smemShape.end(), 1, + std::multiplies{}); + auto bytes = + srcTy.getElementType().isa() + ? elems * kPtrBitWidth / 8 + : elems * std::max(8, srcTy.getElementTypeBitWidth()) / 8; + allocation->addBuffer(op, bytes); + } else if (auto atomicRMWOp = dyn_cast(op)) { + auto value = op->getOperand(0); + // only scalar requires scratch memory + // make it explicit for readability + if (value.getType().dyn_cast()) { + // nothing to do + } else { + auto smemShape = getScratchConfigForAtomicRMW(atomicRMWOp); + unsigned elems = std::accumulate(smemShape.begin(), smemShape.end(), 1, + std::multiplies{}); + auto elemTy = + value.getType().cast().getPointeeType(); + auto bytes = + elemTy.isa() + ? elems * kPtrBitWidth / 8 + : elems * std::max(8, elemTy.getIntOrFloatBitWidth()) / 8; + allocation->addBuffer(op, bytes); + } + } else if (auto atomicCASOp = dyn_cast(op)) { + auto value = op->getOperand(0); + auto smemShape = getScratchConfigForAtomicCAS(atomicCASOp); + unsigned elems = std::accumulate(smemShape.begin(), smemShape.end(), 1, + std::multiplies{}); + auto elemTy = + value.getType().cast().getPointeeType(); + auto bytes = elemTy.isa() + ? elems * kPtrBitWidth / 8 + : elems * elemTy.getIntOrFloatBitWidth() / 8; + allocation->addBuffer(op, bytes); + } + } + + void getValueAlias(Value value, SharedMemoryAliasAnalysis &analysis) { + LatticeElement *latticeElement = + analysis.lookupLatticeElement(value); + if (latticeElement) { + auto &info = latticeElement->getValue(); + if (!info.getAllocs().empty()) { + for (auto alloc : info.getAllocs()) { + allocation->addAlias(value, alloc); + } + } + } + } + + /// Extract all shared memory values and their sizes + void getValuesAndSizes() { + // Get the alloc values + operation->walk([&](Operation *op) { + getExplicitValueSize(op); + getScratchValueSize(op); + }); + // Get the alias values + SharedMemoryAliasAnalysis aliasAnalysis(operation->getContext()); + aliasAnalysis.run(operation); + operation->walk([&](Operation *op) { + for (auto operand : op->getOperands()) { + getValueAlias(operand, aliasAnalysis); + } + for (auto value : op->getResults()) { + getValueAlias(value, aliasAnalysis); + } + }); + } + + /// Computes the liveness range of the allocated value. + /// Each buffer is allocated only once. + void resolveExplicitBufferLiveness( + function_ref(Value value)> getLiveness) { + for (auto valueBufferIter : allocation->valueBuffer) { + auto value = valueBufferIter.first; + auto *buffer = valueBufferIter.second; + bufferRange[buffer] = getLiveness(value); + } + } + + /// Extends the liveness range by unionizing the liveness range of the aliased + /// values because each allocated buffer could be an alias of others, if block + /// arguments are involved. + void resolveAliasBufferLiveness( + function_ref(Value value)> getLiveness) { + for (auto aliasBufferIter : allocation->aliasBuffer) { + auto value = aliasBufferIter.first; + auto buffers = aliasBufferIter.second; + auto range = getLiveness(value); + for (auto *buffer : buffers) { + auto minId = range.start(); + auto maxId = range.end(); + if (bufferRange.count(buffer)) { + // Extend the allocated buffer's range + minId = std::min(minId, bufferRange[buffer].start()); + maxId = std::max(maxId, bufferRange[buffer].end()); + } + bufferRange[buffer] = Interval(minId, maxId); + } + } + } + + /// Computes the liveness range of scratched buffers. + /// Some operations may have a temporary buffer that is not explicitly + /// allocated, but is used to store intermediate results. + void resolveScratchBufferLiveness( + const DenseMap &operationId) { + // Analyze liveness of scratch buffers + for (auto opScratchIter : allocation->opScratch) { + // Any scratch memory's live range is the current operation's live + // range. + auto *op = opScratchIter.first; + auto *buffer = opScratchIter.second; + bufferRange.insert({buffer, Interval(operationId.lookup(op), + operationId.lookup(op) + 1)}); + } + } + + /// Resolves liveness of all values involved under the root operation. + void resolveLiveness() { + // In the SCF dialect, we always have a sequentially nested structure of + // blocks + DenseMap operationId; + operation->walk( + [&](Operation *op) { operationId[op] = operationId.size(); }); + + // Analyze liveness of explicit buffers + Liveness liveness(operation); + auto getValueLivenessRange = [&](Value value) { + auto liveOperations = liveness.resolveLiveness(value); + auto minId = std::numeric_limits::max(); + auto maxId = std::numeric_limits::min(); + std::for_each(liveOperations.begin(), liveOperations.end(), + [&](Operation *liveOp) { + if (operationId[liveOp] < minId) { + minId = operationId[liveOp]; + } + if ((operationId[liveOp] + 1) > maxId) { + maxId = operationId[liveOp] + 1; + } + }); + return Interval(minId, maxId); + }; + + resolveExplicitBufferLiveness(getValueLivenessRange); + resolveAliasBufferLiveness(getValueLivenessRange); + resolveScratchBufferLiveness(operationId); + } + + /// Computes the shared memory offsets for all related values. + /// Paper: Algorithms for Compile-Time Memory Optimization + /// (https://www.cs.utexas.edu/users/harrison/papers/compile-time.pdf) + void computeOffsets() { + SmallVector buffers; + for (auto bufferIter : bufferRange) { + buffers.emplace_back(bufferIter.first); + } + + DenseMap bufferStart; + calculateStarts(buffers, bufferStart); + + GraphT interference; + buildInterferenceGraph(buffers, bufferStart, interference); + + allocate(buffers, bufferStart, interference); + } + + /// Computes the initial shared memory offsets. + void calculateStarts(const SmallVector &buffers, + DenseMap &bufferStart) { + // v = values in shared memory + // t = triplet of (size, start, end) + // shared memory space + // - + // | *******t4 + // | /|\ v2 inserts t4, t5, and t6 + // | | + // | ******t5 ************t6 + // | ^^^^^v2^^^^^^ + // | | *********************t2 + // | \|/ v2 erases t1 + // | ******t1 ^^^^^^^^^v1^^^^^^^^^ ************t3 + // |---------------------------------------------| liveness range + // 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 ... + /// Start -> Liveness Range + using TripleMapT = std::multimap>; + TripleMapT tripleMap; + tripleMap.insert(std::make_pair(0, Interval())); + SmallVector xBuffers = buffers; + while (!xBuffers.empty()) { + auto tripleIt = tripleMap.begin(); + auto size = tripleIt->first; + auto range = tripleIt->second; + tripleMap.erase(tripleIt); + auto bufferIt = + std::find_if(xBuffers.begin(), xBuffers.end(), [&](auto *buffer) { + auto xRange = bufferRange[buffer]; + bool res = xRange.intersects(range); + for (auto val : tripleMap) + res = res && !val.second.intersects(xRange); + return res; + }); + if (bufferIt != xBuffers.end()) { + auto buffer = *bufferIt; + auto xSize = buffer->size; + auto xRange = bufferRange.lookup(buffer); + bufferStart[buffer] = size; + tripleMap.insert( + {size + xSize, Interval{std::max(range.start(), xRange.start()), + std::min(range.end(), xRange.end())}}); + if (range.start() < xRange.start()) + tripleMap.insert({size, Interval{range.start(), xRange.end()}}); + if (xRange.end() < range.end()) + tripleMap.insert({size, Interval{xRange.start(), range.end()}}); + xBuffers.erase(bufferIt); + } + } + } + + /// Builds a graph of all shared memory values. Edges are created between + /// shared memory values that are overlapping. + void buildInterferenceGraph(const SmallVector &buffers, + const DenseMap &bufferStart, + GraphT &interference) { + for (auto x : buffers) { + for (auto y : buffers) { + if (x == y) + continue; + auto xStart = bufferStart.lookup(x); + auto yStart = bufferStart.lookup(y); + auto xSize = x->size; + auto ySize = y->size; + Interval xSizeRange = {xStart, xStart + xSize}; + Interval ySizeRange = {yStart, yStart + ySize}; + auto xOpRange = bufferRange.lookup(x); + auto yOpRange = bufferRange.lookup(y); + if (xOpRange.intersects(yOpRange) && + xSizeRange.intersects(ySizeRange)) { + interference[x].insert(y); + } + } + } + } + + /// Finalizes shared memory offsets considering interference. + void allocate(const SmallVector &buffers, + const DenseMap &bufferStart, + const GraphT &interference) { + // First-fit graph coloring + // Neighbors are nodes that interfere with each other. + // We color a node by finding the index of the first available + // non-neighboring node or the first neighboring node without any color. + // Nodes with the same color do not interfere with each other. + DenseMap colors; + for (auto value : buffers) { + colors[value] = (value == buffers[0]) ? 0 : -1; + } + SmallVector available(buffers.size()); + for (auto x : buffers) { + std::fill(available.begin(), available.end(), true); + for (auto y : interference.lookup(x)) { + int color = colors[y]; + if (color >= 0) { + available[color] = false; + } + } + auto it = std::find(available.begin(), available.end(), true); + colors[x] = std::distance(available.begin(), it); + } + // Finalize allocation + // color0: [0, 7), [0, 8), [0, 15) -> [0, 7), [0, 8), [0, 15) + // color1: [7, 9) -> [0 + 1 * 15, 9 + 1 * 15) -> [15, 24) + // color2: [8, 12) -> [8 + 2 * 15, 12 + 2 * 15) -> [38, 42) + // TODO(Keren): We are wasting memory here. + // Nodes with color2 can actually start with 24. + for (auto x : buffers) { + size_t adj = 0; + for (auto y : interference.lookup(x)) { + adj = std::max(adj, bufferStart.lookup(y) + y->size); + } + x->offset = bufferStart.lookup(x) + colors.lookup(x) * adj; + allocation->sharedMemorySize = + std::max(allocation->sharedMemorySize, x->offset + x->size); + } + } + +private: + Operation *operation; + Allocation *allocation; + BufferRangeMapT bufferRange; +}; +} // namespace triton + +void Allocation::run() { triton::AllocationAnalysis(getOperation(), this); } + +} // namespace mlir diff --git a/lib/Analysis/AxisInfo.cpp b/lib/Analysis/AxisInfo.cpp new file mode 100644 index 000000000000..42394c3a392e --- /dev/null +++ b/lib/Analysis/AxisInfo.cpp @@ -0,0 +1,321 @@ +#include "mlir/Analysis/DataFlowAnalysis.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "llvm/Support/raw_ostream.h" +#include + +#include "triton/Analysis/AxisInfo.h" +#include "triton/Dialect/Triton/IR/Dialect.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" + +namespace mlir { + +//===----------------------------------------------------------------------===// +// AxisInfo +//===----------------------------------------------------------------------===// + +// Function for extended Euclidean Algorithm +static int gcd_impl(int a, int b, int *x, int *y) { + // Base Case + if (a == 0) { + *x = 0; + *y = 1; + return b; + } + int x1, y1; // To store results of recursive call + int gcd = gcd_impl(b % a, a, &x1, &y1); + // Update x and y using results of + // recursive call + *x = y1 - (b / a) * x1; + *y = x1; + return gcd; +} + +static int gcd(int a, int b) { + int x, y; + return gcd_impl(a, b, &x, &y); +} + +AxisInfo AxisInfo::getPessimisticValueState(Value value) { + size_t rank = 1; + if (TensorType ty = value.getType().dyn_cast()) + rank = ty.getRank(); + int divHint = 1; + BlockArgument blockArg = value.dyn_cast(); + if (blockArg && blockArg.getOwner()->isEntryBlock()) { + Operation *op = blockArg.getOwner()->getParentOp(); + if (FuncOp fun = dyn_cast(op)) { + Attribute attr = + fun.getArgAttr(blockArg.getArgNumber(), "tt.divisibility"); + if (attr) + divHint = attr.cast().getValue().getZExtValue(); + } else if (auto fun = dyn_cast(op)) { + Attribute attr = + fun.getArgAttr(blockArg.getArgNumber(), "tt.divisibility"); + if (attr) + divHint = attr.cast().getValue().getZExtValue(); + } + } + DimVectorT contiguity(rank, 1); + DimVectorT divisibility(rank, divHint); + DimVectorT constancy(rank, 1); + return AxisInfo(contiguity, divisibility, constancy); +} + +// The gcd of both arguments for each dimension +AxisInfo AxisInfo::join(const AxisInfo &lhs, const AxisInfo &rhs) { + DimVectorT retContiguity; + DimVectorT retDivisibility; + DimVectorT retConstancy; + for (int d = 0; d < lhs.getRank(); ++d) { + retContiguity.push_back(gcd(lhs.getContiguity(d), rhs.getContiguity(d))); + retDivisibility.push_back( + gcd(lhs.getDivisibility(d), rhs.getDivisibility(d))); + retConstancy.push_back(gcd(lhs.getConstancy(d), rhs.getConstancy(d))); + } + return AxisInfo(retContiguity, retDivisibility, retConstancy); +} + +//===----------------------------------------------------------------------===// +// AxisInfoAnalysis +//===----------------------------------------------------------------------===// + +AxisInfo AxisInfoAnalysis::visitBinaryOp( + Operation *op, AxisInfo lhsInfo, AxisInfo rhsInfo, + const std::function &getContiguity, + const std::function &getDivisibility, + const std::function &getConstancy) { + int rank = lhsInfo.getRank(); + AxisInfo::DimVectorT newContiguity; + AxisInfo::DimVectorT newDivisibility; + AxisInfo::DimVectorT newConstancy; + for (int d = 0; d < rank; ++d) { + newContiguity.push_back(getContiguity(lhsInfo, rhsInfo, d)); + newDivisibility.push_back(getDivisibility(lhsInfo, rhsInfo, d)); + newConstancy.push_back(getConstancy(lhsInfo, rhsInfo, d)); + } + return AxisInfo(newContiguity, newDivisibility, newConstancy); +} + +ChangeResult AxisInfoAnalysis::visitOperation( + Operation *op, ArrayRef *> operands) { + AxisInfo curr; + // This preserves the input axes (e.g., cast): + if (llvm::isa(op)) + curr = operands[0]->getValue(); + // Constant ranges + if (triton::MakeRangeOp make_range = + llvm::dyn_cast(op)) { + int start = make_range.start(); + int end = make_range.end(); + AxisInfo::DimVectorT contiguity = {end - start}; + AxisInfo::DimVectorT divisibility = {highestPowOf2Divisor(start)}; + AxisInfo::DimVectorT constancy = {1}; + curr = AxisInfo(contiguity, divisibility, constancy); + } + // Constant + if (arith::ConstantOp constant = llvm::dyn_cast(op)) { + auto intAttr = constant.getValue().dyn_cast(); + if (intAttr) { + size_t val = intAttr.getValue().getZExtValue(); + curr = AxisInfo({1}, {highestPowOf2Divisor(val)}, {1}); + } + // TODO: generalize to dense attr + auto splatAttr = constant.getValue().dyn_cast(); + if (splatAttr && splatAttr.getElementType().isInteger(32)) { + auto value = splatAttr.getSplatValue(); + TensorType ty = splatAttr.getType().cast(); + curr = AxisInfo( + AxisInfo::DimVectorT(ty.getRank(), 1), + AxisInfo::DimVectorT(ty.getRank(), highestPowOf2Divisor(value)), + AxisInfo::DimVectorT(ty.getShape().begin(), ty.getShape().end())); + } + } + // TODO: refactor & complete binary ops + // Addition + if (llvm::isa(op)) { + auto newContiguity = [&](AxisInfo lhs, AxisInfo rhs, int d) { + return std::max(gcd(lhs.getContiguity(d), rhs.getConstancy(d)), + gcd(lhs.getConstancy(d), rhs.getContiguity(d))); + }; + auto newConstancy = [&](AxisInfo lhs, AxisInfo rhs, int d) { + return gcd(lhs.getConstancy(d), rhs.getConstancy(d)); + }; + auto newDivisibility = [&](AxisInfo lhs, AxisInfo rhs, int d) { + return gcd(lhs.getDivisibility(d), rhs.getDivisibility(d)); + }; + curr = visitBinaryOp(op, operands[0]->getValue(), operands[1]->getValue(), + newContiguity, newDivisibility, newConstancy); + } + // Multiplication + if (llvm::isa(op)) { + auto newContiguity = [](AxisInfo lhs, AxisInfo rhs, int d) { return 1; }; + auto newConstancy = [](AxisInfo lhs, AxisInfo rhs, int d) { + return gcd(lhs.getConstancy(d), rhs.getConstancy(d)); + }; + auto newDivisibility = [](AxisInfo lhs, AxisInfo rhs, int d) { + return lhs.getDivisibility(d) * rhs.getDivisibility(d); + }; + curr = visitBinaryOp(op, operands[0]->getValue(), operands[1]->getValue(), + newContiguity, newDivisibility, newConstancy); + } + // Remainder + if (llvm::isa(op)) { + auto newContiguity = [](AxisInfo lhs, AxisInfo rhs, int d) { + return gcd(lhs.getContiguity(d), rhs.getDivisibility(d)); + }; + auto newDivisibility = [](AxisInfo lhs, AxisInfo rhs, int d) { + return gcd(lhs.getDivisibility(d), rhs.getDivisibility(d)); + }; + auto newConstancy = [](AxisInfo lhs, AxisInfo rhs, int d) { + return gcd(lhs.getConstancy(d), rhs.getConstancy(d)); + }; + curr = visitBinaryOp(op, operands[0]->getValue(), operands[1]->getValue(), + newContiguity, newDivisibility, newConstancy); + } + // TODO: All other binary ops + if (llvm::isa(op)) { + auto newContiguity = [](AxisInfo lhs, AxisInfo rhs, int d) { return 1; }; + auto newDivisibility = [](AxisInfo lhs, AxisInfo rhs, int d) { return 1; }; + auto newConstancy = [](AxisInfo lhs, AxisInfo rhs, int d) { + return gcd(lhs.getConstancy(d), rhs.getConstancy(d)); + }; + curr = visitBinaryOp(op, operands[0]->getValue(), operands[1]->getValue(), + newContiguity, newDivisibility, newConstancy); + } + // Splat + if (llvm::isa(op)) { + Type _retTy = *op->result_type_begin(); + TensorType retTy = _retTy.cast(); + AxisInfo opInfo = operands[0]->getValue(); + AxisInfo::DimVectorT contiguity; + AxisInfo::DimVectorT divisibility; + AxisInfo::DimVectorT constancy; + for (int d = 0; d < retTy.getRank(); ++d) { + contiguity.push_back(1); + divisibility.push_back(opInfo.getDivisibility(0)); + constancy.push_back(retTy.getShape()[d]); + } + curr = AxisInfo(contiguity, divisibility, constancy); + } + // expandDims + if (auto expandDims = llvm::dyn_cast(op)) { + AxisInfo opInfo = operands[0]->getValue(); + AxisInfo::DimVectorT contiguity = opInfo.getContiguity(); + AxisInfo::DimVectorT divisibility = opInfo.getDivisibility(); + AxisInfo::DimVectorT constancy = opInfo.getConstancy(); + contiguity.insert(contiguity.begin() + expandDims.axis(), 1); + divisibility.insert(divisibility.begin() + expandDims.axis(), 1); + constancy.insert(constancy.begin() + expandDims.axis(), 1); + curr = AxisInfo(contiguity, divisibility, constancy); + } + // Broadcast + if (llvm::isa(op)) { + Type _retTy = *op->result_type_begin(); + Type _opTy = *op->operand_type_begin(); + TensorType retTy = _retTy.cast(); + TensorType opTy = _opTy.cast(); + ArrayRef retShape = retTy.getShape(); + ArrayRef opShape = opTy.getShape(); + AxisInfo opInfo = operands[0]->getValue(); + AxisInfo::DimVectorT contiguity; + AxisInfo::DimVectorT divisibility; + AxisInfo::DimVectorT constancy; + for (int d = 0; d < retTy.getRank(); ++d) { + contiguity.push_back(opShape[d] == 1 ? 1 : opInfo.getContiguity(d)); + divisibility.push_back(opInfo.getDivisibility(d)); + constancy.push_back(opShape[d] == 1 ? retShape[d] + : opInfo.getConstancy(d)); + } + curr = AxisInfo(contiguity, divisibility, constancy); + } + + // CmpI + if ((llvm::dyn_cast(op) || + llvm::dyn_cast(op)) && + op->getResult(0).getType().dyn_cast()) { + auto resTy = op->getResult(0).getType().cast(); + short rank = resTy.getRank(); + auto lhsInfo = operands[0]->getValue(); + auto rhsInfo = operands[1]->getValue(); + auto shape = resTy.getShape(); + + AxisInfo::DimVectorT contiguity, divisibility, constancy; + for (short d = 0; d < rank; ++d) { + if (rhsInfo.getConstancy(d) % lhsInfo.getContiguity(d) == 0 || + rhsInfo.getConstancy(d) % lhsInfo.getConstancy(d)) + constancy.push_back( + gcd(lhsInfo.getDivisibility(d), rhsInfo.getDivisibility(d))); + else + constancy.push_back(1); + + divisibility.push_back(shape[d]); + contiguity.push_back(1); + } + + curr = AxisInfo(contiguity, divisibility, constancy); + } + + // UnrealizedConversionCast + // This is needed by TritonGPUToLLVM, to get AxisInfo when the graph is + // in the process of a PartialConversion, where UnrealizedConversionCast + // may exist + if (llvm::isa(op)) { + curr = operands[0]->getValue(); + } + if (curr.getRank() == 0) { + return markAllPessimisticFixpoint(op->getResults()); + } + + // join all lattice elements + ChangeResult result = ChangeResult::NoChange; + for (Value value : op->getResults()) { + result |= getLatticeElement(value).join(curr); + } + return result; +} + +unsigned AxisInfoAnalysis::getPtrVectorSize(Value ptr) { + auto tensorTy = ptr.getType().dyn_cast(); + if (!tensorTy) + return 1; + auto layout = tensorTy.getEncoding(); + auto shape = tensorTy.getShape(); + + // Here order should be ordered by contiguous first, so the first element + // should have the largest contiguous. + auto order = triton::gpu::getOrder(layout); + unsigned align = getPtrAlignment(ptr); + + unsigned contigPerThread = triton::gpu::getSizePerThread(layout)[order[0]]; + unsigned vec = std::min(align, contigPerThread); + vec = std::min(shape[order[0]], vec); + + return vec; +} + +unsigned AxisInfoAnalysis::getPtrAlignment(Value ptr) { + auto tensorTy = ptr.getType().dyn_cast(); + if (!tensorTy) + return 1; + auto axisInfo = lookupLatticeElement(ptr)->getValue(); + auto layout = tensorTy.getEncoding(); + auto order = triton::gpu::getOrder(layout); + unsigned maxMultiple = axisInfo.getDivisibility(order[0]); + unsigned maxContig = axisInfo.getContiguity(order[0]); + unsigned alignment = std::min(maxMultiple, maxContig); + return alignment; +} + +unsigned AxisInfoAnalysis::getMaskAlignment(Value mask) { + auto tensorTy = mask.getType().dyn_cast(); + if (!tensorTy) + return 1; + auto maskOrder = triton::gpu::getOrder(tensorTy.getEncoding()); + auto maskAxis = lookupLatticeElement(mask)->getValue(); + auto alignment = std::max(maskAxis.getConstancy(maskOrder[0]), 1); + return alignment; +} + +} // namespace mlir diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt new file mode 100644 index 000000000000..34059e9feb58 --- /dev/null +++ b/lib/Analysis/CMakeLists.txt @@ -0,0 +1,10 @@ +add_mlir_library(TritonAnalysis + AxisInfo.cpp + Allocation.cpp + Membar.cpp + Alias.cpp + Utility.cpp + + DEPENDS + TritonGPUAttrDefsIncGen +) \ No newline at end of file diff --git a/lib/Analysis/Membar.cpp b/lib/Analysis/Membar.cpp new file mode 100644 index 000000000000..88dd9165d5b2 --- /dev/null +++ b/lib/Analysis/Membar.cpp @@ -0,0 +1,137 @@ +#include "triton/Analysis/Membar.h" +#include "triton/Analysis/Alias.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" + +#include "mlir/Dialect/GPU/GPUDialect.h" +#include "mlir/Dialect/Tensor/IR/Tensor.h" + +namespace mlir { + +void MembarAnalysis::run() { + auto *operation = allocation->getOperation(); + RegionInfo regionInfo; + OpBuilder builder(operation); + dfsOperation(operation, ®ionInfo, &builder); +} + +void MembarAnalysis::dfsOperation(Operation *operation, + RegionInfo *parentRegionInfo, + OpBuilder *builder) { + transfer(operation, parentRegionInfo, builder); + if (operation->getNumRegions()) { + // If there's any nested regions, we need to visit them. + // scf.if and scf.else: two regions + // scf.if only: two regions + // scf.for: one region + RegionInfo curRegionInfo; + auto traverseRegions = [&]() -> auto{ + for (auto ®ion : operation->getRegions()) { + // Copy the parent info as the current info. + RegionInfo regionInfo = *parentRegionInfo; + for (auto &block : region.getBlocks()) { + assert(region.getBlocks().size() == 1 && + "Multiple blocks in a region is not supported"); + for (auto &op : block.getOperations()) { + // Traverse the nested operation. + dfsOperation(&op, ®ionInfo, builder); + } + } + curRegionInfo.join(regionInfo); + } + // Set the parent region info as the union of the nested region info. + *parentRegionInfo = curRegionInfo; + }; + + traverseRegions(); + if (isa(operation)) { + // scf.for can have two possible inputs: the init value and the + // previous iteration's result. Although we've applied alias analysis, + // there could be unsynced memory accesses on reused memories. + // For example, consider the following code: + // %1 = convert_layout %0: blocked -> shared + // ... + // gpu.barrier + // ... + // %5 = convert_layout %4 : shared -> dot + // %6 = tt.dot %2, %5 + // scf.yield + // + // Though %5 could be released before scf.yield, it may shared the same + // memory with %1. So we actually have to insert a barrier before %1 to + // make sure the memory is synced. + traverseRegions(); + } + } +} + +void MembarAnalysis::transfer(Operation *op, RegionInfo *regionInfo, + OpBuilder *builder) { + if (isa(op) || isa(op) || isa(op) || + isa(op) || isa(op)) { + // Do not insert barriers before control flow operations and + // alloc/extract/insert + // alloc is an allocation op without memory write. + // FIXME(Keren): extract_slice is always alias for now + return; + } + + if (isa(op)) { + // If the current op is a barrier, we sync previous reads and writes + regionInfo->sync(); + return; + } + + if (isa(op) && + !isa(op->getNextNode())) { + // If the current op is an async wait and the next op is not a barrier we + // insert a barrier op and sync + regionInfo->sync(); + OpBuilder::InsertionGuard g(*builder); + builder->setInsertionPointAfter(op); + builder->create(op->getLoc()); + regionInfo->sync(); + return; + } + + RegionInfo curRegionInfo; + for (Value value : op->getOperands()) { + for (auto bufferId : allocation->getBufferIds(value)) { + if (bufferId != Allocation::InvalidBufferId) { + if (isa(op) || + isa(op)) { + // FIXME(Keren): insert_slice and insert_slice_async are always alias + // for now + curRegionInfo.syncWriteBuffers.insert(bufferId); + } else { + // ConvertLayoutOp: shared memory -> registers + curRegionInfo.syncReadBuffers.insert(bufferId); + } + } + } + } + for (Value value : op->getResults()) { + // ConvertLayoutOp: registers -> shared memory + auto bufferId = allocation->getBufferId(value); + if (bufferId != Allocation::InvalidBufferId) { + curRegionInfo.syncWriteBuffers.insert(bufferId); + } + } + // Scratch buffer is considered as both shared memory write & read + auto bufferId = allocation->getBufferId(op); + if (bufferId != Allocation::InvalidBufferId) { + curRegionInfo.syncWriteBuffers.insert(bufferId); + curRegionInfo.syncReadBuffers.insert(bufferId); + } + + if (regionInfo->isIntersected(curRegionInfo, allocation)) { + OpBuilder::InsertionGuard g(*builder); + builder->setInsertionPoint(op); + builder->create(op->getLoc()); + regionInfo->sync(); + } + // Update the region info, even if barrier is inserted, we have to maintain + // the current op's read/write buffers. + regionInfo->join(curRegionInfo); +} + +} // namespace mlir diff --git a/lib/Analysis/Utility.cpp b/lib/Analysis/Utility.cpp new file mode 100644 index 000000000000..f442c3da290e --- /dev/null +++ b/lib/Analysis/Utility.cpp @@ -0,0 +1,151 @@ +#include "triton/Analysis/Utility.h" +#include "mlir/IR/Dialect.h" +#include "triton/Dialect/Triton/IR/Dialect.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" + +namespace mlir { + +bool ReduceOpHelper::isFastReduction() { + auto srcLayout = srcTy.getEncoding(); + auto axis = op.axis(); + return axis == triton::gpu::getOrder(srcLayout)[0]; +} + +unsigned ReduceOpHelper::getInterWarpSize() { + auto srcLayout = srcTy.getEncoding(); + auto srcShape = srcTy.getShape(); + auto axis = op.axis(); + auto srcReduceDimSize = static_cast(srcShape[axis]); + unsigned sizeIntraWarps = getIntraWarpSize(); + return std::min(srcReduceDimSize / sizeIntraWarps, + triton::gpu::getWarpsPerCTA(srcLayout)[axis]); +} + +unsigned ReduceOpHelper::getIntraWarpSize() { + auto srcLayout = srcTy.getEncoding(); + auto srcShape = srcTy.getShape(); + auto axis = op.axis(); + auto srcReduceDimSize = static_cast(srcShape[axis]); + return std::min(srcReduceDimSize, + triton::gpu::getThreadsPerWarp(srcLayout)[axis]); +} + +unsigned ReduceOpHelper::getThreadsReductionAxis() { + auto srcLayout = srcTy.getEncoding(); + auto axis = op.axis(); + return triton::gpu::getThreadsPerWarp(srcLayout)[axis] * + triton::gpu::getWarpsPerCTA(srcLayout)[axis]; +} + +SmallVector ReduceOpHelper::getScratchConfigBasic() { + auto axis = op.axis(); + auto smemShape = convertType(getSrcShape()); + smemShape[axis] = std::min(smemShape[axis], getThreadsReductionAxis()); + return smemShape; +} + +SmallVector> ReduceOpHelper::getScratchConfigsFast() { + auto axis = op.axis(); + SmallVector> smemShapes(3); + + /// shared memory block0 + smemShapes[0] = convertType(getSrcShape()); + smemShapes[0][axis] = getInterWarpSize(); + + /// FIXME(Qingyi): This size is actually larger than required. + /// shared memory block1: + auto mod = op.getOperation()->getParentOfType(); + unsigned numWarps = triton::gpu::TritonGPUDialect::getNumWarps(mod); + smemShapes[1].push_back(numWarps * 32); + + return smemShapes; +} + +unsigned ReduceOpHelper::getScratchSizeInBytes() { + unsigned elems = 0; + if (isFastReduction()) { + auto smemShapes = getScratchConfigsFast(); + for (const auto &smemShape : smemShapes) + elems = std::max(elems, product(smemShape)); + } else { + auto smemShape = getScratchConfigBasic(); + elems = product(smemShape); + } + + auto tensorType = op.operand().getType().cast(); + unsigned bytes = elems * tensorType.getElementTypeBitWidth() / 8; + + if (triton::ReduceOp::withIndex(op.redOp())) + bytes += elems * sizeof(int32_t); + + return bytes; +} + +bool isSharedEncoding(Value value) { + auto type = value.getType(); + if (auto tensorType = type.dyn_cast()) { + auto encoding = tensorType.getEncoding(); + return encoding && encoding.isa(); + } + return false; +} + +bool maybeSharedAllocationOp(Operation *op) { + // TODO(Keren): This function can be replaced by adding + // MemoryEffectOpInterface. We can then use the MemoryEffectOpInterface to + // query the memory effects of the op. + auto *dialect = op->getDialect(); + return dialect && + (dialect->getTypeID() == + mlir::TypeID::get() || + dialect->getTypeID() == mlir::TypeID::get() || + dialect->getTypeID() == + mlir::TypeID::get() || + dialect->getTypeID() == mlir::TypeID::get()); +} + +bool maybeAliasOp(Operation *op) { + return isa(op) || isa(op) || + isa(op) || + isa(op); +} + +bool supportMMA(triton::DotOp op, int version) { + // Refer to mma section for the data type supported by Volta and Hopper + // Tensor Core in + // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-fragment-mma-884-f16 + auto aElemTy = op.a().getType().cast().getElementType(); + auto bElemTy = op.b().getType().cast().getElementType(); + if (aElemTy.isF32() && bElemTy.isF32()) { + return op.allowTF32() && version >= 2; + } + return supportMMA(op.a(), version) && supportMMA(op.b(), version); +} + +bool supportMMA(Value value, int version) { + // Tell whether a DotOp support HMMA by the operand type(either $a or $b). + // We cannot get both the operand types(in TypeConverter), here we assume the + // types of both the operands are identical here. + assert((version == 1 || version == 2) && + "Unexpected MMA layout version found"); + auto elemTy = value.getType().cast().getElementType(); + return elemTy.isF16() || elemTy.isBF16() || + (elemTy.isF32() && version >= 2) || + (elemTy.isInteger(8) && version >= 2); +} + +Type getElementType(Value value) { + auto type = value.getType(); + if (auto tensorType = type.dyn_cast()) + return tensorType.getElementType(); + return type; +} + +std::string getValueOperandName(Value value, AsmState &state) { + std::string opName; + llvm::raw_string_ostream ss(opName); + value.printAsOperand(ss, state); + return opName; +} + +} // namespace mlir diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt new file mode 100644 index 000000000000..ab1d31a76317 --- /dev/null +++ b/lib/CMakeLists.txt @@ -0,0 +1,5 @@ +# add_subdirectory(codegen) +add_subdirectory(Analysis) +add_subdirectory(Conversion) +add_subdirectory(Dialect) +add_subdirectory(Target) diff --git a/lib/Conversion/CMakeLists.txt b/lib/Conversion/CMakeLists.txt new file mode 100644 index 000000000000..143a4375a811 --- /dev/null +++ b/lib/Conversion/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(TritonToTritonGPU) +add_subdirectory(TritonGPUToLLVM) diff --git a/lib/Conversion/TritonGPUToLLVM/CMakeLists.txt b/lib/Conversion/TritonGPUToLLVM/CMakeLists.txt new file mode 100644 index 000000000000..4bd0f251377f --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/CMakeLists.txt @@ -0,0 +1,31 @@ +add_mlir_conversion_library(TritonGPUToLLVM + TritonGPUToLLVM.cpp + TritonGPUToLLVMPass.cpp + PTXAsmFormat.cpp + ConvertLayoutOpToLLVM.cpp + ElementwiseOpToLLVM.cpp + ViewOpToLLVM.cpp + LoadStoreOpToLLVM.cpp + DotOpToLLVM.cpp + ReduceOpToLLVM.cpp + + ADDITIONAL_HEADER_DIRS + ${PROJECT_SOURCE_DIR}/include/triton/Conversion/TritonGPUToLLVM + + DEPENDS + TritonConversionPassIncGen + + LINK_COMPONENTS + Core + + LINK_LIBS PUBLIC + MLIRIR + MLIRPass + MLIRGPUOps + MLIRGPUToNVVMTransforms + MLIRGPUTransforms + TritonAnalysis + TritonIR + TritonGPUIR + TritonGPUTransforms +) diff --git a/lib/Conversion/TritonGPUToLLVM/ConvertLayoutOpToLLVM.cpp b/lib/Conversion/TritonGPUToLLVM/ConvertLayoutOpToLLVM.cpp new file mode 100644 index 000000000000..1a85ca5da000 --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/ConvertLayoutOpToLLVM.cpp @@ -0,0 +1,686 @@ +#include "ConvertLayoutOpToLLVM.h" +#include "DotOpHelpers.h" + +using ::mlir::LLVM::DotOpFMAConversionHelper; +using ::mlir::LLVM::DotOpMmaV1ConversionHelper; +using ::mlir::LLVM::getElementsFromStruct; +using ::mlir::LLVM::getSharedMemoryObjectFromStruct; +using ::mlir::LLVM::getStridesFromShapeAndOrder; +using ::mlir::LLVM::getStructFromElements; +using ::mlir::LLVM::MMA16816ConversionHelper; +using ::mlir::triton::gpu::DotOperandEncodingAttr; +using ::mlir::triton::gpu::getElemsPerThread; +using ::mlir::triton::gpu::getOrder; +using ::mlir::triton::gpu::getShapePerCTA; +using ::mlir::triton::gpu::getSizePerThread; +using ::mlir::triton::gpu::SharedEncodingAttr; + +bool isMmaToDotShortcut(MmaEncodingAttr &mmaLayout, + DotOperandEncodingAttr &dotOperandLayout) { + // dot_op = #mma + // when #mma = MmaEncoding + return mmaLayout.getWarpsPerCTA()[1] == 1 && + dotOperandLayout.getOpIdx() == 0 && + dotOperandLayout.getParent() == mmaLayout; +} + +void storeBlockedToShared(Value src, Value llSrc, ArrayRef srcStrides, + ArrayRef srcIndices, Value dst, Value smemBase, + Type elemTy, Location loc, + ConversionPatternRewriter &rewriter) { + auto srcTy = src.getType().cast(); + auto srcShape = srcTy.getShape(); + assert(srcShape.size() == 2 && "Unexpected rank of insertSlice"); + + auto dstTy = dst.getType().cast(); + auto srcBlockedLayout = srcTy.getEncoding().cast(); + auto dstSharedLayout = dstTy.getEncoding().cast(); + auto inOrd = srcBlockedLayout.getOrder(); + auto outOrd = dstSharedLayout.getOrder(); + if (inOrd != outOrd) + llvm_unreachable( + "blocked -> shared with different order not yet implemented"); + unsigned inVec = + inOrd == outOrd ? srcBlockedLayout.getSizePerThread()[inOrd[0]] : 1; + unsigned outVec = dstSharedLayout.getVec(); + unsigned minVec = std::min(outVec, inVec); + unsigned perPhase = dstSharedLayout.getPerPhase(); + unsigned maxPhase = dstSharedLayout.getMaxPhase(); + unsigned numElems = getElemsPerThread(srcTy); + auto inVals = getElementsFromStruct(loc, llSrc, rewriter); + auto srcAccumSizeInThreads = + product(srcBlockedLayout.getSizePerThread()); + auto wordTy = vec_ty(elemTy, minVec); + auto elemPtrTy = ptr_ty(elemTy); + + // TODO: [goostavz] We should make a cache for the calculation of + // emitBaseIndexForBlockedLayout in case backend compiler not being able to + // optimize that + SmallVector srcShapePerCTA = getShapePerCTA(srcBlockedLayout); + SmallVector reps{ceil(srcShape[0], srcShapePerCTA[0]), + ceil(srcShape[1], srcShapePerCTA[1])}; + + // Visit each input value in the order they are placed in inVals + // + // Please note that the order was not awaring of blockLayout.getOrder(), + // thus the adjacent elems may not belong to a same word. This could be + // improved if we update the elements order by emitIndicesForBlockedLayout() + SmallVector wordsInEachRep(2); + wordsInEachRep[0] = inOrd[0] == 0 + ? srcBlockedLayout.getSizePerThread()[0] / minVec + : srcBlockedLayout.getSizePerThread()[0]; + wordsInEachRep[1] = inOrd[0] == 0 + ? srcBlockedLayout.getSizePerThread()[1] + : srcBlockedLayout.getSizePerThread()[1] / minVec; + Value outVecVal = i32_val(outVec); + Value minVecVal = i32_val(minVec); + auto numWordsEachRep = product(wordsInEachRep); + SmallVector wordVecs(numWordsEachRep); + for (unsigned i = 0; i < numElems; ++i) { + if (i % srcAccumSizeInThreads == 0) { + // start of a replication + for (unsigned w = 0; w < numWordsEachRep; ++w) { + wordVecs[w] = undef(wordTy); + } + } + unsigned linearIdxInNanoTile = i % srcAccumSizeInThreads; + auto multiDimIdxInNanoTile = getMultiDimIndex( + linearIdxInNanoTile, srcBlockedLayout.getSizePerThread(), inOrd); + unsigned pos = multiDimIdxInNanoTile[inOrd[0]] % minVec; + multiDimIdxInNanoTile[inOrd[0]] /= minVec; + auto wordVecIdx = + getLinearIndex(multiDimIdxInNanoTile, wordsInEachRep, inOrd); + wordVecs[wordVecIdx] = + insert_element(wordTy, wordVecs[wordVecIdx], inVals[i], i32_val(pos)); + + if (i % srcAccumSizeInThreads == srcAccumSizeInThreads - 1) { + // end of replication, store the vectors into shared memory + unsigned linearRepIdx = i / srcAccumSizeInThreads; + auto multiDimRepIdx = + getMultiDimIndex(linearRepIdx, reps, inOrd); + for (unsigned linearWordIdx = 0; linearWordIdx < numWordsEachRep; + ++linearWordIdx) { + // step 1: recover the multidim_index from the index of + // input_elements + auto multiDimWordIdx = + getMultiDimIndex(linearWordIdx, wordsInEachRep, inOrd); + SmallVector multiDimIdx(2); + auto wordOffset0 = multiDimRepIdx[0] * srcShapePerCTA[0] + + multiDimWordIdx[0] * (inOrd[0] == 0 ? minVec : 1); + auto wordOffset1 = multiDimRepIdx[1] * srcShapePerCTA[1] + + multiDimWordIdx[1] * (inOrd[0] == 1 ? minVec : 1); + multiDimIdx[0] = add(srcIndices[0], i32_val(wordOffset0)); + multiDimIdx[1] = add(srcIndices[1], i32_val(wordOffset1)); + + // step 2: do swizzling + Value remained = urem(multiDimIdx[outOrd[0]], outVecVal); + multiDimIdx[outOrd[0]] = udiv(multiDimIdx[outOrd[0]], outVecVal); + Value off_1 = mul(multiDimIdx[outOrd[1]], srcStrides[outOrd[1]]); + Value phaseId = udiv(multiDimIdx[outOrd[1]], i32_val(perPhase)); + phaseId = urem(phaseId, i32_val(maxPhase)); + Value off_0 = xor_(multiDimIdx[outOrd[0]], phaseId); + off_0 = mul(off_0, outVecVal); + remained = udiv(remained, minVecVal); + off_0 = add(off_0, mul(remained, minVecVal)); + Value offset = add(off_1, off_0); + + // step 3: store + Value smemAddr = gep(elemPtrTy, smemBase, offset); + smemAddr = bitcast(smemAddr, ptr_ty(wordTy, 3)); + store(wordVecs[linearWordIdx], smemAddr); + } + } + } +} + +struct ConvertLayoutOpConversion + : public ConvertTritonGPUOpToLLVMPattern { +public: + using ConvertTritonGPUOpToLLVMPattern< + triton::gpu::ConvertLayoutOp>::ConvertTritonGPUOpToLLVMPattern; + + LogicalResult + matchAndRewrite(triton::gpu::ConvertLayoutOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Value src = op.src(); + Value dst = op.result(); + auto srcTy = src.getType().cast(); + auto dstTy = dst.getType().cast(); + Attribute srcLayout = srcTy.getEncoding(); + Attribute dstLayout = dstTy.getEncoding(); + if (srcLayout.isa() && + dstLayout.isa()) { + return lowerBlockedToShared(op, adaptor, rewriter); + } + if (srcLayout.isa() && + dstLayout.isa()) { + return lowerSharedToDotOperand(op, adaptor, rewriter); + } + if ((srcLayout.isa() || + srcLayout.isa() || + srcLayout.isa()) && + (dstLayout.isa() || + dstLayout.isa() || + dstLayout.isa())) { + return lowerDistributedToDistributed(op, adaptor, rewriter); + } + if (srcLayout.isa() && + dstLayout.isa()) { + return lowerMmaToDotOperand(op, adaptor, rewriter); + } + // TODO: to be implemented + llvm_unreachable("unsupported layout conversion"); + return failure(); + } + +private: + SmallVector getMultiDimOffset(Attribute layout, Location loc, + ConversionPatternRewriter &rewriter, + unsigned elemId, ArrayRef shape, + ArrayRef multiDimCTAInRepId, + ArrayRef shapePerCTA) const { + unsigned rank = shape.size(); + if (auto blockedLayout = layout.dyn_cast()) { + auto multiDimOffsetFirstElem = + emitBaseIndexForBlockedLayout(loc, rewriter, blockedLayout, shape); + SmallVector multiDimOffset(rank); + SmallVector multiDimElemId = getMultiDimIndex( + elemId, getSizePerThread(layout), getOrder(layout)); + for (unsigned d = 0; d < rank; ++d) { + multiDimOffset[d] = add(multiDimOffsetFirstElem[d], + idx_val(multiDimCTAInRepId[d] * shapePerCTA[d] + + multiDimElemId[d])); + } + return multiDimOffset; + } + if (auto sliceLayout = layout.dyn_cast()) { + unsigned dim = sliceLayout.getDim(); + auto multiDimOffsetParent = + getMultiDimOffset(sliceLayout.getParent(), loc, rewriter, elemId, + sliceLayout.paddedShape(shape), + sliceLayout.paddedShape(multiDimCTAInRepId), + sliceLayout.paddedShape(shapePerCTA)); + SmallVector multiDimOffset(rank); + for (unsigned d = 0; d < rank + 1; ++d) { + if (d == dim) + continue; + unsigned slicedD = d < dim ? d : (d - 1); + multiDimOffset[slicedD] = multiDimOffsetParent[d]; + } + return multiDimOffset; + } + if (auto mmaLayout = layout.dyn_cast()) { + SmallVector mmaColIdx(4); + SmallVector mmaRowIdx(2); + Value threadId = getThreadId(rewriter, loc); + Value warpSize = idx_val(32); + Value laneId = urem(threadId, warpSize); + Value warpId = udiv(threadId, warpSize); + // TODO: fix the bug in MMAEncodingAttr document + SmallVector multiDimWarpId(2); + multiDimWarpId[0] = urem(warpId, idx_val(mmaLayout.getWarpsPerCTA()[0])); + multiDimWarpId[1] = udiv(warpId, idx_val(mmaLayout.getWarpsPerCTA()[0])); + Value _1 = idx_val(1); + Value _2 = idx_val(2); + Value _4 = idx_val(4); + Value _8 = idx_val(8); + Value _16 = idx_val(16); + if (mmaLayout.isAmpere()) { + multiDimWarpId[0] = urem(multiDimWarpId[0], idx_val(shape[0] / 16)); + multiDimWarpId[1] = urem(multiDimWarpId[1], idx_val(shape[1] / 8)); + Value mmaGrpId = udiv(laneId, _4); + Value mmaGrpIdP8 = add(mmaGrpId, _8); + Value mmaThreadIdInGrp = urem(laneId, _4); + Value mmaThreadIdInGrpM2 = mul(mmaThreadIdInGrp, _2); + Value mmaThreadIdInGrpM2P1 = add(mmaThreadIdInGrpM2, _1); + Value rowWarpOffset = mul(multiDimWarpId[0], _16); + mmaRowIdx[0] = add(mmaGrpId, rowWarpOffset); + mmaRowIdx[1] = add(mmaGrpIdP8, rowWarpOffset); + Value colWarpOffset = mul(multiDimWarpId[1], _8); + mmaColIdx[0] = add(mmaThreadIdInGrpM2, colWarpOffset); + mmaColIdx[1] = add(mmaThreadIdInGrpM2P1, colWarpOffset); + } else if (mmaLayout.isVolta()) { + multiDimWarpId[0] = urem(multiDimWarpId[0], idx_val(shape[0] / 16)); + multiDimWarpId[1] = urem(multiDimWarpId[1], idx_val(shape[1] / 16)); + Value laneIdDiv16 = udiv(laneId, _16); + Value laneIdRem16 = urem(laneId, _16); + Value laneIdRem2 = urem(laneId, _2); + Value laneIdRem16Div8 = udiv(laneIdRem16, _8); + Value laneIdRem16Div4 = udiv(laneIdRem16, _4); + Value laneIdRem16Div4Rem2 = urem(laneIdRem16Div4, _2); + Value laneIdRem4Div2 = udiv(urem(laneId, _4), _2); + Value rowWarpOffset = mul(multiDimWarpId[0], _16); + Value colWarpOffset = mul(multiDimWarpId[1], _16); + mmaRowIdx[0] = + add(add(mul(laneIdDiv16, _8), mul(laneIdRem16Div4Rem2, _4)), + laneIdRem2); + mmaRowIdx[0] = add(mmaRowIdx[0], rowWarpOffset); + mmaRowIdx[1] = add(mmaRowIdx[0], _2); + mmaColIdx[0] = add(mul(laneIdRem16Div8, _4), mul(laneIdRem4Div2, _2)); + mmaColIdx[0] = add(mmaColIdx[0], colWarpOffset); + mmaColIdx[1] = add(mmaColIdx[0], _1); + mmaColIdx[2] = add(mmaColIdx[0], _8); + mmaColIdx[3] = add(mmaColIdx[0], idx_val(9)); + } else { + llvm_unreachable("Unexpected MMALayout version"); + } + + assert(rank == 2); + SmallVector multiDimOffset(rank); + if (mmaLayout.isAmpere()) { + multiDimOffset[0] = elemId < 2 ? mmaRowIdx[0] : mmaRowIdx[1]; + multiDimOffset[1] = elemId % 2 == 0 ? mmaColIdx[0] : mmaColIdx[1]; + multiDimOffset[0] = add( + multiDimOffset[0], idx_val(multiDimCTAInRepId[0] * shapePerCTA[0])); + multiDimOffset[1] = add( + multiDimOffset[1], idx_val(multiDimCTAInRepId[1] * shapePerCTA[1])); + } else if (mmaLayout.isVolta()) { + // the order of elements in a thread: + // c0, c1, ... c4, c5 + // c2, c3, ... c6, c7 + if (elemId < 2) { + multiDimOffset[0] = mmaRowIdx[0]; + multiDimOffset[1] = mmaColIdx[elemId % 2]; + } else if (elemId >= 2 && elemId < 4) { + multiDimOffset[0] = mmaRowIdx[1]; + multiDimOffset[1] = mmaColIdx[elemId % 2]; + } else if (elemId >= 4 && elemId < 6) { + multiDimOffset[0] = mmaRowIdx[0]; + multiDimOffset[1] = mmaColIdx[elemId % 2 + 2]; + } else if (elemId >= 6) { + multiDimOffset[0] = mmaRowIdx[1]; + multiDimOffset[1] = mmaColIdx[elemId % 2 + 2]; + } + multiDimOffset[0] = add( + multiDimOffset[0], idx_val(multiDimCTAInRepId[0] * shapePerCTA[0])); + multiDimOffset[1] = add( + multiDimOffset[1], idx_val(multiDimCTAInRepId[1] * shapePerCTA[1])); + } else { + llvm_unreachable("Unexpected MMALayout version"); + } + return multiDimOffset; + } + llvm_unreachable("unexpected layout in getMultiDimOffset"); + } + + // shared memory rd/st for blocked or mma layout with data padding + void processReplica(Location loc, ConversionPatternRewriter &rewriter, + bool stNotRd, RankedTensorType type, + ArrayRef numCTAsEachRep, + ArrayRef multiDimRepId, unsigned vec, + ArrayRef paddedRepShape, + ArrayRef outOrd, SmallVector &vals, + Value smemBase) const { + auto accumNumCTAsEachRep = product(numCTAsEachRep); + auto layout = type.getEncoding(); + auto blockedLayout = layout.dyn_cast(); + auto sliceLayout = layout.dyn_cast(); + auto mmaLayout = layout.dyn_cast(); + auto rank = type.getRank(); + auto sizePerThread = getSizePerThread(layout); + auto accumSizePerThread = product(sizePerThread); + SmallVector numCTAs(rank); + auto shapePerCTA = getShapePerCTA(layout); + auto order = getOrder(layout); + for (unsigned d = 0; d < rank; ++d) { + numCTAs[d] = ceil(type.getShape()[d], shapePerCTA[d]); + } + auto elemTy = type.getElementType(); + bool isInt1 = elemTy.isInteger(1); + bool isPtr = elemTy.isa(); + auto llvmElemTyOrig = getTypeConverter()->convertType(elemTy); + if (isInt1) + elemTy = IntegerType::get(elemTy.getContext(), 8); + else if (isPtr) + elemTy = IntegerType::get(elemTy.getContext(), 64); + + auto llvmElemTy = getTypeConverter()->convertType(elemTy); + + for (unsigned ctaId = 0; ctaId < accumNumCTAsEachRep; ++ctaId) { + auto multiDimCTAInRepId = + getMultiDimIndex(ctaId, numCTAsEachRep, order); + SmallVector multiDimCTAId(rank); + for (const auto &it : llvm::enumerate(multiDimCTAInRepId)) { + auto d = it.index(); + multiDimCTAId[d] = multiDimRepId[d] * numCTAsEachRep[d] + it.value(); + } + + auto linearCTAId = + getLinearIndex(multiDimCTAId, numCTAs, order); + // TODO: This is actually redundant index calculation, we should + // consider of caching the index calculation result in case + // of performance issue observed. + for (unsigned elemId = 0; elemId < accumSizePerThread; elemId += vec) { + SmallVector multiDimOffset = + getMultiDimOffset(layout, loc, rewriter, elemId, type.getShape(), + multiDimCTAInRepId, shapePerCTA); + Value offset = + linearize(rewriter, loc, multiDimOffset, paddedRepShape, outOrd); + + auto elemPtrTy = ptr_ty(llvmElemTy, 3); + Value ptr = gep(elemPtrTy, smemBase, offset); + auto vecTy = vec_ty(llvmElemTy, vec); + ptr = bitcast(ptr, ptr_ty(vecTy, 3)); + if (stNotRd) { + Value valVec = undef(vecTy); + for (unsigned v = 0; v < vec; ++v) { + auto currVal = vals[elemId + linearCTAId * accumSizePerThread + v]; + if (isInt1) + currVal = zext(llvmElemTy, currVal); + else if (isPtr) + currVal = ptrtoint(llvmElemTy, currVal); + valVec = insert_element(vecTy, valVec, currVal, idx_val(v)); + } + store(valVec, ptr); + } else { + Value valVec = load(ptr); + for (unsigned v = 0; v < vec; ++v) { + Value currVal = extract_element(llvmElemTy, valVec, idx_val(v)); + if (isInt1) + currVal = icmp_ne(currVal, + rewriter.create( + loc, i8_ty, rewriter.getI8IntegerAttr(0))); + else if (isPtr) + currVal = inttoptr(llvmElemTyOrig, currVal); + vals[elemId + linearCTAId * accumSizePerThread + v] = currVal; + } + } + } + } + } + + // blocked/mma -> blocked/mma. + // Data padding in shared memory to avoid bank conflict. + LogicalResult + lowerDistributedToDistributed(triton::gpu::ConvertLayoutOp op, + OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const { + auto loc = op.getLoc(); + Value src = op.src(); + Value dst = op.result(); + auto srcTy = src.getType().cast(); + auto dstTy = dst.getType().cast(); + Attribute srcLayout = srcTy.getEncoding(); + Attribute dstLayout = dstTy.getEncoding(); + auto llvmElemTy = getTypeConverter()->convertType(dstTy.getElementType()); + Value smemBase = getSharedMemoryBase(loc, rewriter, op.getOperation()); + auto elemPtrTy = ptr_ty(llvmElemTy, 3); + smemBase = bitcast(smemBase, elemPtrTy); + auto shape = dstTy.getShape(); + unsigned rank = dstTy.getRank(); + SmallVector numReplicates(rank); + SmallVector inNumCTAsEachRep(rank); + SmallVector outNumCTAsEachRep(rank); + SmallVector inNumCTAs(rank); + SmallVector outNumCTAs(rank); + auto srcShapePerCTA = getShapePerCTA(srcLayout); + auto dstShapePerCTA = getShapePerCTA(dstLayout); + for (unsigned d = 0; d < rank; ++d) { + unsigned inPerCTA = std::min(shape[d], srcShapePerCTA[d]); + unsigned outPerCTA = std::min(shape[d], dstShapePerCTA[d]); + unsigned maxPerCTA = std::max(inPerCTA, outPerCTA); + numReplicates[d] = ceil(shape[d], maxPerCTA); + inNumCTAsEachRep[d] = maxPerCTA / inPerCTA; + outNumCTAsEachRep[d] = maxPerCTA / outPerCTA; + assert(maxPerCTA % inPerCTA == 0 && maxPerCTA % outPerCTA == 0); + inNumCTAs[d] = ceil(shape[d], inPerCTA); + outNumCTAs[d] = ceil(shape[d], outPerCTA); + } + // Potentially we need to store for multiple CTAs in this replication + auto accumNumReplicates = product(numReplicates); + // unsigned elems = getElemsPerThread(srcTy); + auto vals = getElementsFromStruct(loc, adaptor.src(), rewriter); + unsigned inVec = 0; + unsigned outVec = 0; + auto paddedRepShape = getScratchConfigForCvtLayout(op, inVec, outVec); + + unsigned outElems = getElemsPerThread(dstTy); + auto outOrd = getOrder(dstLayout); + SmallVector outVals(outElems); + + for (unsigned repId = 0; repId < accumNumReplicates; ++repId) { + auto multiDimRepId = + getMultiDimIndex(repId, numReplicates, outOrd); + if (repId != 0) + barrier(); + if (srcLayout.isa() || + srcLayout.isa() || + srcLayout.isa()) { + processReplica(loc, rewriter, /*stNotRd*/ true, srcTy, inNumCTAsEachRep, + multiDimRepId, inVec, paddedRepShape, outOrd, vals, + smemBase); + } else { + assert(0 && "ConvertLayout with input layout not implemented"); + return failure(); + } + barrier(); + if (dstLayout.isa() || + dstLayout.isa() || + dstLayout.isa()) { + processReplica(loc, rewriter, /*stNotRd*/ false, dstTy, + outNumCTAsEachRep, multiDimRepId, outVec, paddedRepShape, + outOrd, outVals, smemBase); + } else { + assert(0 && "ConvertLayout with output layout not implemented"); + return failure(); + } + } + + SmallVector types(outElems, llvmElemTy); + auto *ctx = llvmElemTy.getContext(); + Type structTy = struct_ty(types); + Value result = getStructFromElements(loc, outVals, rewriter, structTy); + rewriter.replaceOp(op, result); + + return success(); + } + + // blocked -> shared. + // Swizzling in shared memory to avoid bank conflict. Normally used for + // A/B operands of dots. + LogicalResult + lowerBlockedToShared(triton::gpu::ConvertLayoutOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const { + auto loc = op.getLoc(); + Value src = op.src(); + Value dst = op.result(); + auto srcTy = src.getType().cast(); + auto srcShape = srcTy.getShape(); + auto dstTy = dst.getType().cast(); + auto dstShape = dstTy.getShape(); + assert(srcShape.size() == 2 && + "Unexpected rank of ConvertLayout(blocked->shared)"); + auto srcBlockedLayout = srcTy.getEncoding().cast(); + auto dstSharedLayout = dstTy.getEncoding().cast(); + auto inOrd = srcBlockedLayout.getOrder(); + auto outOrd = dstSharedLayout.getOrder(); + Value smemBase = getSharedMemoryBase(loc, rewriter, dst); + auto elemTy = getTypeConverter()->convertType(srcTy.getElementType()); + auto elemPtrTy = ptr_ty(getTypeConverter()->convertType(elemTy), 3); + smemBase = bitcast(smemBase, elemPtrTy); + + auto srcStrides = + getStridesFromShapeAndOrder(srcShape, inOrd, loc, rewriter); + auto srcIndices = emitBaseIndexForBlockedLayout(loc, rewriter, + srcBlockedLayout, srcShape); + storeBlockedToShared(src, adaptor.src(), srcStrides, srcIndices, dst, + smemBase, elemTy, loc, rewriter); + + auto smemObj = + SharedMemoryObject(smemBase, dstShape, outOrd, loc, rewriter); + auto retVal = getStructFromSharedMemoryObject(loc, smemObj, rewriter); + rewriter.replaceOp(op, retVal); + return success(); + } + + // shared -> mma_operand + LogicalResult + lowerSharedToDotOperand(triton::gpu::ConvertLayoutOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const { + auto loc = op.getLoc(); + Value src = op.src(); + Value dst = op.result(); + auto dstTensorTy = dst.getType().cast(); + auto srcTensorTy = src.getType().cast(); + auto dotOperandLayout = + dstTensorTy.getEncoding().cast(); + auto sharedLayout = srcTensorTy.getEncoding().cast(); + + bool isOuter{}; + int K{}; + if (dotOperandLayout.getOpIdx() == 0) // $a + K = dstTensorTy.getShape()[sharedLayout.getOrder()[0]]; + else // $b + K = dstTensorTy.getShape()[sharedLayout.getOrder()[1]]; + isOuter = K == 1; + + Value res; + if (auto mmaLayout = + dotOperandLayout.getParent().dyn_cast_or_null()) { + res = lowerSharedToDotOperandMMA(op, adaptor, rewriter, mmaLayout, + dotOperandLayout, isOuter); + } else if (auto blockedLayout = + dotOperandLayout.getParent() + .dyn_cast_or_null()) { + auto dotOpLayout = + dstTensorTy.getEncoding().cast(); + DotOpFMAConversionHelper helper(blockedLayout); + auto thread = getThreadId(rewriter, loc); + if (dotOpLayout.getOpIdx() == 0) { // $a + res = helper.loadA(src, adaptor.src(), blockedLayout, thread, loc, + rewriter); + } else { // $b + res = helper.loadB(src, adaptor.src(), blockedLayout, thread, loc, + rewriter); + } + } else { + assert(false && "Unsupported dot operand layout found"); + } + + rewriter.replaceOp(op, res); + return success(); + } + + // mma -> dot_operand + LogicalResult + lowerMmaToDotOperand(triton::gpu::ConvertLayoutOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const { + auto loc = op.getLoc(); + auto srcTy = op.src().getType().cast(); + auto dstTy = op.result().getType().cast(); + auto srcLayout = srcTy.getEncoding(); + auto dstLayout = dstTy.getEncoding(); + auto srcMmaLayout = srcLayout.cast(); + auto dstDotLayout = dstLayout.cast(); + if (isMmaToDotShortcut(srcMmaLayout, dstDotLayout)) { + // get source values + auto vals = getElementsFromStruct(loc, adaptor.src(), rewriter); + unsigned elems = getElemsPerThread(srcTy); + Type elemTy = + this->getTypeConverter()->convertType(srcTy.getElementType()); + // for the destination type, we need to pack values together + // so they can be consumed by tensor core operations + unsigned vecSize = + std::max(32 / elemTy.getIntOrFloatBitWidth(), 1); + Type vecTy = vec_ty(elemTy, vecSize); + SmallVector types(elems / vecSize, vecTy); + SmallVector vecVals; + for (unsigned i = 0; i < elems; i += vecSize) { + Value packed = rewriter.create(loc, vecTy); + for (unsigned j = 0; j < vecSize; j++) + packed = insert_element(vecTy, packed, vals[i + j], i32_val(j)); + vecVals.push_back(packed); + } + + // This needs to be ordered the same way that + // ldmatrix.x4 would order it + // TODO: this needs to be refactor so we don't + // implicitly depends on how emitOffsetsForMMAV2 + // is implemented + SmallVector reorderedVals; + for (unsigned i = 0; i < vecVals.size(); i += 4) { + reorderedVals.push_back(vecVals[i]); + reorderedVals.push_back(vecVals[i + 2]); + reorderedVals.push_back(vecVals[i + 1]); + reorderedVals.push_back(vecVals[i + 3]); + } + + // return composeValuesToDotOperandLayoutStruct(ha, numRepM, numRepK); + + Type structTy = + LLVM::LLVMStructType::getLiteral(this->getContext(), types); + Value view = + getStructFromElements(loc, reorderedVals, rewriter, structTy); + rewriter.replaceOp(op, view); + return success(); + } + return failure(); + } + + // shared -> dot_operand if the result layout is mma + Value lowerSharedToDotOperandMMA( + triton::gpu::ConvertLayoutOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter, const MmaEncodingAttr &mmaLayout, + const DotOperandEncodingAttr &dotOperandLayout, bool isOuter) const { + auto loc = op.getLoc(); + Value src = op.src(); + Value dst = op.result(); + bool isHMMA = supportMMA(dst, mmaLayout.getVersionMajor()); + + auto smemObj = + getSharedMemoryObjectFromStruct(loc, adaptor.src(), rewriter); + Value res; + + if (!isOuter && mmaLayout.isAmpere() && isHMMA) { // tensor core v2 + MMA16816ConversionHelper mmaHelper(src.getType(), mmaLayout, + getThreadId(rewriter, loc), rewriter, + getTypeConverter(), op.getLoc()); + + if (dotOperandLayout.getOpIdx() == 0) { + // operand $a + res = mmaHelper.loadA(src, smemObj); + } else if (dotOperandLayout.getOpIdx() == 1) { + // operand $b + res = mmaHelper.loadB(src, smemObj); + } + } else if (!isOuter && mmaLayout.isVolta() && isHMMA) { // tensor core v1 + DotOpMmaV1ConversionHelper helper(mmaLayout); + bool isMMAv1Row = + dotOperandLayout.getIsMMAv1Row().cast().getValue(); + auto srcSharedLayout = src.getType() + .cast() + .getEncoding() + .cast(); + + // Can only convert [1, 0] to row or [0, 1] to col for now + if ((srcSharedLayout.getOrder()[0] == 1 && !isMMAv1Row) || + (srcSharedLayout.getOrder()[0] == 0 && isMMAv1Row)) { + llvm::errs() << "Unsupported Shared -> DotOperand[MMAv1] conversion\n"; + return Value(); + } + + if (dotOperandLayout.getOpIdx() == 0) { // operand $a + // TODO[Superjomn]: transA is not available here. + bool transA = false; + res = helper.loadA(src, transA, smemObj, getThreadId(rewriter, loc), + loc, rewriter); + } else if (dotOperandLayout.getOpIdx() == 1) { // operand $b + // TODO[Superjomn]: transB is not available here. + bool transB = false; + res = helper.loadB(src, transB, smemObj, getThreadId(rewriter, loc), + loc, rewriter); + } + } else { + assert(false && "Unsupported mma layout found"); + } + return res; + } +}; + +void populateConvertLayoutOpToLLVMPatterns( + mlir::LLVMTypeConverter &typeConverter, RewritePatternSet &patterns, + int numWarps, AxisInfoAnalysis &axisInfoAnalysis, + const Allocation *allocation, Value smem, PatternBenefit benefit) { + patterns.add(typeConverter, allocation, smem, + benefit); +} diff --git a/lib/Conversion/TritonGPUToLLVM/ConvertLayoutOpToLLVM.h b/lib/Conversion/TritonGPUToLLVM/ConvertLayoutOpToLLVM.h new file mode 100644 index 000000000000..ebf943b6f7db --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/ConvertLayoutOpToLLVM.h @@ -0,0 +1,24 @@ +#ifndef TRITON_CONVERSION_TRITONGPU_TO_LLVM_CONVERT_LAYOUT_OP_H +#define TRITON_CONVERSION_TRITONGPU_TO_LLVM_CONVERT_LAYOUT_OP_H + +#include "TritonGPUToLLVMBase.h" + +using namespace mlir; +using namespace mlir::triton; + +using ::mlir::triton::gpu::DotOperandEncodingAttr; + +bool isMmaToDotShortcut(MmaEncodingAttr &mmaLayout, + DotOperandEncodingAttr &dotOperandLayout); + +void storeBlockedToShared(Value src, Value llSrc, ArrayRef srcStrides, + ArrayRef srcIndices, Value dst, Value smemBase, + Type elemPtrTy, Location loc, + ConversionPatternRewriter &rewriter); + +void populateConvertLayoutOpToLLVMPatterns( + mlir::LLVMTypeConverter &typeConverter, RewritePatternSet &patterns, + int numWarps, AxisInfoAnalysis &axisInfoAnalysis, + const Allocation *allocation, Value smem, PatternBenefit benefit); + +#endif diff --git a/lib/Conversion/TritonGPUToLLVM/DotOpHelpers.h b/lib/Conversion/TritonGPUToLLVM/DotOpHelpers.h new file mode 100644 index 000000000000..fa94ba3f335d --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/DotOpHelpers.h @@ -0,0 +1,1782 @@ +#ifndef TRITON_CONVERSION_TRITONGPU_TO_LLVM_DOT_OP_HELPERS_H +#define TRITON_CONVERSION_TRITONGPU_TO_LLVM_DOT_OP_HELPERS_H + +#include "mlir/Analysis/SliceAnalysis.h" +#include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" +#include "mlir/Conversion/GPUToNVVM/GPUToNVVMPass.h" +#include "mlir/Conversion/LLVMCommon/LoweringOptions.h" +#include "mlir/Conversion/LLVMCommon/Pattern.h" +#include "mlir/Conversion/MathToLLVM/MathToLLVM.h" +#include "mlir/Conversion/SCFToStandard/SCFToStandard.h" +#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h" +#include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" +#include "mlir/Dialect/GPU/GPUDialect.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/Tensor/IR/Tensor.h" +#include "mlir/IR/Matchers.h" +#include "mlir/IR/TypeUtilities.h" +#include "mlir/Transforms/DialectConversion.h" +#include "triton/Analysis/AxisInfo.h" +#include "triton/Analysis/Utility.h" +#include "triton/Conversion/MLIRTypes.h" +#include "triton/Conversion/TritonGPUToLLVM/PTXAsmFormat.h" +#include "triton/Dialect/Triton/IR/Dialect.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" +#include "llvm/Support/Format.h" +#include "llvm/Support/FormatVariadic.h" + +#include "Utility.h" + +namespace mlir { +namespace LLVM { +using namespace mlir::triton; +using ::mlir::triton::gpu::BlockedEncodingAttr; +using ::mlir::triton::gpu::DotOperandEncodingAttr; +using ::mlir::triton::gpu::MmaEncodingAttr; +using ::mlir::triton::gpu::SharedEncodingAttr; + +// Helper for conversion of DotOp with mma, that is sm<80 +struct DotOpMmaV1ConversionHelper { + MmaEncodingAttr mmaLayout; + ArrayRef wpt; + static constexpr std::array fpw{{2, 2, 1}}; + + using ValueTable = std::map, std::pair>; + + explicit DotOpMmaV1ConversionHelper(MmaEncodingAttr mmaLayout) + : mmaLayout(mmaLayout), wpt(mmaLayout.getWarpsPerCTA()) {} + + // Help to share some variables across multiple functions for A. + struct AParam { + SmallVector rep; + SmallVector spw; + + // TODO[Superjomn]: Support the case when isAVec4=false later + // Currently, we only support ld.v2, for the mma layout varies with + // different ld vector width. + // bool isAVec4 = !isARow && shapeTransed[orderTransed[0]] <= 16; + const bool isAVec4{true}; + + explicit AParam(bool isARow) { + int packSize0 = (isARow || isAVec4) ? 1 : 2; + int repM = 2 * packSize0; + int repK = 1; + int spwM = fpw[0] * 4 * repM; + rep.assign({repM, 0, repK}); + spw.assign({spwM, 0, 1}); + } + }; + + // Help to share some variables across multiple functions for A. + struct BParam { + SmallVector rep; + SmallVector spw; + // TODO[Superjomn]: Support the case when isBVec4=false later + // Currently, we only support ld.v2, for the mma layout varies with + // different ld vector width. + // bool isBVec4 = isBRow && shapeTransed[orderTransed[0]] <= 16; + const bool isBVec4{true}; + + explicit BParam(bool isBRow) { + int packSize1 = (isBRow && !isBVec4) ? 2 : 1; + rep.assign({0, 2 * packSize1, 1}); + spw.assign({0, fpw[1] * 4 * rep[1], 1}); + } + }; + + int getRepM(int M) const { + return std::max(M / (wpt[0] * instrShape[0]), 1); + } + int getRepN(int N) const { + return std::max(N / (wpt[1] * instrShape[1]), 1); + } + + static ArrayRef getMmaInstrShape() { return instrShape; } + + static Type getMmaRetType(TensorType operand) { + auto *ctx = operand.getContext(); + Type fp32Ty = type::f32Ty(ctx); + // f16*f16+f32->f32 + return struct_ty(SmallVector{8, fp32Ty}); + } + + // Get the number of fp16x2 elements for $a. + // \param shapeTransed: A's shape or reordered shape if transpose needed. + // \param orderTransed: the order or reordered order if transpose needed. + unsigned getNumM(ArrayRef shapeTransed, bool isARow) const { + AParam param(isARow); + + unsigned numM = param.rep[0] * shapeTransed[0] / (param.spw[0] * wpt[0]); + return numM; + } + + // Get the number of fp16x2 elements for $b. + // \param shapeTransed: B' shape or reordered shape if transpose needed. + // \param orderTransed: the order or reordered order if transpose needed. + unsigned getNumN(ArrayRef shapeTransed, bool isBRow) const { + BParam param(isBRow); + + unsigned numN = param.rep[1] * shapeTransed[1] / (param.spw[1] * wpt[1]); + return numN; + } + + int numElemsPerThreadA(ArrayRef shapeTransed, + ArrayRef orderTransed) const { + int numM = getNumM(shapeTransed, orderTransed[0] == 1); + int NK = shapeTransed[1]; + + // NOTE: We couldn't get the vec from the shared layout. + // int vecA = sharedLayout.getVec(); + // TODO[Superjomn]: Consider the case when vecA > 4 + bool vecGt4 = false; + int elemsPerLd = vecGt4 ? 4 : 2; + return (numM / 2) * (NK / 4) * elemsPerLd; + } + + int numElemsPerThreadB(ArrayRef shapeTransed, + ArrayRef orderTransed) const { + unsigned numN = getNumN(shapeTransed, orderTransed[0] == 1); + int NK = shapeTransed[0]; + // NOTE: We couldn't get the vec from the shared layout. + // int vecB = sharedLayout.getVec(); + // TODO[Superjomn]: Consider the case when vecA > 4 + bool vecGt4 = false; + int elemsPerLd = vecGt4 ? 4 : 2; + return (numN / 2) * (NK / 4) * elemsPerLd; + } + + // Loading $a from smem to registers, returns a LLVM::Struct. + Value loadA(Value tensor, bool transA, const SharedMemoryObject &smemObj, + Value thread, Location loc, + ConversionPatternRewriter &rewriter) const { + auto *ctx = rewriter.getContext(); + auto tensorTy = tensor.getType().cast(); + auto sharedLayout = tensorTy.getEncoding().cast(); + SmallVector shape(tensorTy.getShape().begin(), + tensorTy.getShape().end()); + SmallVector order(sharedLayout.getOrder().begin(), + sharedLayout.getOrder().end()); + + Value cSwizzleOffset = smemObj.getCSwizzleOffset(order[0]); + Value smemBase = smemObj.getBaseBeforeSwizzle(order[0], loc, rewriter); + + bool isARow = order[0] != 0; + AParam param(isARow); + + auto [offsetAM, offsetAK, _0, _1] = computeOffsets( + thread, isARow, false, fpw, param.spw, param.rep, rewriter, loc); + + if (transA) { + std::swap(shape[0], shape[1]); + std::swap(offsetAM, offsetAK); + std::swap(order[0], order[1]); + } + + int vecA = sharedLayout.getVec(); + + auto strides = smemObj.strides; + Value strideAM = isARow ? strides[0] : i32_val(1); + Value strideAK = isARow ? i32_val(1) : strides[1]; + Value strideA0 = isARow ? strideAK : strideAM; + Value strideA1 = isARow ? strideAM : strideAK; + + int strideRepM = wpt[0] * fpw[0] * 8; + int strideRepK = 1; + + // swizzling + int perPhaseA = sharedLayout.getPerPhase(); + int maxPhaseA = sharedLayout.getMaxPhase(); + int stepA0 = isARow ? strideRepK : strideRepM; + int numPtrA = std::max(2 * perPhaseA * maxPhaseA / stepA0, 1); + int NK = shape[1]; + + // pre-compute pointer lanes + Value offA0 = isARow ? offsetAK : offsetAM; + Value offA1 = isARow ? offsetAM : offsetAK; + Value phaseA = urem(udiv(offA1, i32_val(perPhaseA)), i32_val(maxPhaseA)); + offA0 = add(offA0, cSwizzleOffset); + SmallVector offA(numPtrA); + for (int i = 0; i < numPtrA; i++) { + Value offA0I = add(offA0, i32_val(i * (isARow ? 4 : strideRepM))); + offA0I = udiv(offA0I, i32_val(vecA)); + offA0I = xor_(offA0I, phaseA); + offA0I = mul(offA0I, i32_val(vecA)); + offA[i] = add(mul(offA0I, strideA0), mul(offA1, strideA1)); + } + + Type f16x2Ty = vec_ty(f16_ty, 2); + + // prepare arguments + SmallVector ptrA(numPtrA); + + std::map, std::pair> has; + for (int i = 0; i < numPtrA; i++) + ptrA[i] = gep(ptr_ty(f16_ty), smemBase, offA[i]); + + Type f16PtrTy = ptr_ty(f16_ty); + + auto ld = [&](decltype(has) &vals, int m, int k, Value val0, Value val1) { + vals[{m, k}] = {val0, val1}; + }; + auto loadA = [&](int m, int k) { + int offidx = (isARow ? k / 4 : m) % numPtrA; + Value thePtrA = gep(f16PtrTy, smemBase, offA[offidx]); + + int stepAM = isARow ? m : m / numPtrA * numPtrA; + int stepAK = isARow ? k / (numPtrA * vecA) * (numPtrA * vecA) : k; + Value offset = add(mul(i32_val(stepAM * strideRepM), strideAM), + mul(i32_val(stepAK), strideAK)); + Value pa = gep(f16PtrTy, thePtrA, offset); + Type aPtrTy = ptr_ty(vec_ty(i32_ty, std::max(vecA / 2, 1)), 3); + Value ha = load(bitcast(pa, aPtrTy)); + // record lds that needs to be moved + Value ha00 = bitcast(extract_element(ha, i32_val(0)), f16x2Ty); + Value ha01 = bitcast(extract_element(ha, i32_val(1)), f16x2Ty); + ld(has, m, k, ha00, ha01); + + if (vecA > 4) { + Value ha10 = bitcast(extract_element(ha, i32_val(2)), f16x2Ty); + Value ha11 = bitcast(extract_element(ha, i32_val(3)), f16x2Ty); + if (isARow) + ld(has, m, k + 4, ha10, ha11); + else + ld(has, m + 1, k, ha10, ha11); + } + }; + + unsigned numM = getNumM(shape, order[0] == 1); + for (unsigned k = 0; k < NK; k += 4) + for (unsigned m = 0; m < numM / 2; ++m) + loadA(m, k); + + SmallVector elems; + elems.reserve(has.size() * 2); + for (auto item : has) { // has is a map, the key should be ordered. + elems.push_back(item.second.first); + elems.push_back(item.second.second); + } + + Type resTy = struct_ty(SmallVector(elems.size(), f16x2Ty)); + Value res = getStructFromElements(loc, elems, rewriter, resTy); + return res; + } + + // Loading $b from smem to registers, returns a LLVM::Struct. + Value loadB(Value tensor, bool transB, const SharedMemoryObject &smemObj, + Value thread, Location loc, + ConversionPatternRewriter &rewriter) const { + // smem + auto strides = smemObj.strides; + + auto *ctx = rewriter.getContext(); + auto tensorTy = tensor.getType().cast(); + auto sharedLayout = tensorTy.getEncoding().cast(); + + SmallVector shape(tensorTy.getShape().begin(), + tensorTy.getShape().end()); + SmallVector order(sharedLayout.getOrder().begin(), + sharedLayout.getOrder().end()); + + Value smem = smemObj.getBaseBeforeSwizzle(order[0], loc, rewriter); + bool isBRow = order[0] != 0; + BParam param(isBRow); + + int vecB = sharedLayout.getVec(); + Value strideBN = isBRow ? i32_val(1) : strides[1]; + Value strideBK = isBRow ? strides[0] : i32_val(1); + Value strideB0 = isBRow ? strideBN : strideBK; + Value strideB1 = isBRow ? strideBK : strideBN; + int strideRepN = wpt[1] * fpw[1] * 8; + int strideRepK = 1; + + auto [_0, _1, offsetBN, offsetBK] = computeOffsets( + thread, false, isBRow, fpw, param.spw, param.rep, rewriter, loc); + if (transB) { + std::swap(order[0], order[1]); + std::swap(shape[0], shape[1]); + std::swap(offsetBK, offsetBN); + } + + // swizzling + int perPhaseB = sharedLayout.getPerPhase(); + int maxPhaseB = sharedLayout.getMaxPhase(); + int stepB0 = isBRow ? strideRepN : strideRepK; + int numPtrB = std::max(2 * perPhaseB * maxPhaseB / stepB0, 1); + int NK = shape[0]; + + Value offB0 = isBRow ? offsetBN : offsetBK; + Value offB1 = isBRow ? offsetBK : offsetBN; + Value phaseB = urem(udiv(offB1, i32_val(perPhaseB)), i32_val(maxPhaseB)); + Value cSwizzleOffset = smemObj.getCSwizzleOffset(order[0]); + + offB0 = add(offB0, cSwizzleOffset); + SmallVector offB(numPtrB); + for (int i = 0; i < numPtrB; ++i) { + Value offB0I = add(offB0, i32_val(i * (isBRow ? strideRepN : 4))); + offB0I = udiv(offB0I, i32_val(vecB)); + offB0I = xor_(offB0I, phaseB); + offB0I = mul(offB0I, i32_val(vecB)); + offB[i] = add(mul(offB0I, strideB0), mul(offB1, strideB1)); + } + + Type f16PtrTy = ptr_ty(f16_ty); + Type f16x2Ty = vec_ty(f16_ty, 2); + + SmallVector ptrB(numPtrB); + ValueTable hbs; + for (int i = 0; i < numPtrB; ++i) + ptrB[i] = gep(ptr_ty(f16_ty), smem, offB[i]); + + auto ld = [&](decltype(hbs) &vals, int m, int k, Value val0, Value val1) { + vals[{m, k}] = {val0, val1}; + }; + + auto loadB = [&](int n, int K) { + int offidx = (isBRow ? n : K / 4) % numPtrB; + Value thePtrB = ptrB[offidx]; + + int stepBN = isBRow ? n / numPtrB * numPtrB : n; + int stepBK = isBRow ? K : K / (numPtrB * vecB) * (numPtrB * vecB); + Value offset = add(mul(i32_val(stepBN * strideRepN), strideBN), + mul(i32_val(stepBK), strideBK)); + Value pb = gep(f16PtrTy, thePtrB, offset); + + Value hb = + load(bitcast(pb, ptr_ty(vec_ty(i32_ty, std::max(vecB / 2, 1)), 3))); + // record lds that needs to be moved + Value hb00 = bitcast(extract_element(hb, i32_val(0)), f16x2Ty); + Value hb01 = bitcast(extract_element(hb, i32_val(1)), f16x2Ty); + ld(hbs, n, K, hb00, hb01); + if (vecB > 4) { + Value hb10 = bitcast(extract_element(hb, i32_val(2)), f16x2Ty); + Value hb11 = bitcast(extract_element(hb, i32_val(3)), f16x2Ty); + if (isBRow) + ld(hbs, n + 1, K, hb10, hb11); + else + ld(hbs, n, K + 4, hb10, hb11); + } + }; + + unsigned numN = getNumN(shape, order[0] == 1); + for (unsigned k = 0; k < NK; k += 4) + for (unsigned n = 0; n < numN / 2; ++n) { + if (!hbs.count({n, k})) + loadB(n, k); + } + + SmallVector elems; + for (auto &item : hbs) { // has is a map, the key should be ordered. + elems.push_back(item.second.first); + elems.push_back(item.second.second); + } + Type fp16x2Ty = vec_ty(type::f16Ty(ctx), 2); + Type resTy = struct_ty(SmallVector(elems.size(), fp16x2Ty)); + Value res = getStructFromElements(loc, elems, rewriter, resTy); + return res; + } + + static ArrayRef getOrder() { return mmaOrder; } + + // Compute the offset of the matrix to load. + // Returns offsetAM, offsetAK, offsetBN, offsetBK. + // NOTE, the information M(from $a) and N(from $b) couldn't be retrieved at + // the same time in the usage in convert_layout[shared->dot_op], we leave + // the noexist info to be 0 and only use the desired argument from the + // composed result. In this way we want to retain the original code + // structure in convert_mma884 method for easier debugging. + std::tuple + computeOffsets(Value threadId, bool isARow, bool isBRow, ArrayRef fpw, + ArrayRef spw, ArrayRef rep, + ConversionPatternRewriter &rewriter, Location loc) const { + auto *ctx = rewriter.getContext(); + Value _1 = i32_val(1); + Value _3 = i32_val(3); + Value _4 = i32_val(4); + Value _16 = i32_val(16); + Value _32 = i32_val(32); + + Value lane = urem(threadId, _32); + Value warp = udiv(threadId, _32); + + // warp offset + Value warp0 = urem(warp, i32_val(wpt[0])); + Value warp12 = udiv(warp, i32_val(wpt[0])); + Value warp1 = urem(warp12, i32_val(wpt[1])); + Value warpMOff = mul(warp0, i32_val(spw[0])); + Value warpNOff = mul(warp1, i32_val(spw[1])); + // Quad offset + Value quadMOff = mul(udiv(and_(lane, _16), _4), i32_val(fpw[0])); + Value quadNOff = mul(udiv(and_(lane, _16), _4), i32_val(fpw[1])); + // Pair offset + Value pairMOff = udiv(urem(lane, _16), _4); + pairMOff = urem(pairMOff, i32_val(fpw[0])); + pairMOff = mul(pairMOff, _4); + Value pairNOff = udiv(urem(lane, _16), _4); + pairNOff = udiv(pairNOff, i32_val(fpw[0])); + pairNOff = urem(pairNOff, i32_val(fpw[1])); + pairNOff = mul(pairNOff, _4); + // scale + pairMOff = mul(pairMOff, i32_val(rep[0] / 2)); + quadMOff = mul(quadMOff, i32_val(rep[0] / 2)); + pairNOff = mul(pairNOff, i32_val(rep[1] / 2)); + quadNOff = mul(quadNOff, i32_val(rep[1] / 2)); + // Quad pair offset + Value laneMOff = add(pairMOff, quadMOff); + Value laneNOff = add(pairNOff, quadNOff); + // A offset + Value offsetAM = add(warpMOff, laneMOff); + Value offsetAK = and_(lane, _3); + // B offset + Value offsetBN = add(warpNOff, laneNOff); + Value offsetBK = and_(lane, _3); + // i indices + Value offsetCM = add(and_(lane, _1), offsetAM); + if (isARow) { + offsetAM = add(offsetAM, urem(threadId, _4)); + offsetAK = i32_val(0); + } + if (!isBRow) { + offsetBN = add(offsetBN, urem(threadId, _4)); + offsetBK = i32_val(0); + } + + return std::make_tuple(offsetAM, offsetAK, offsetBN, offsetBK); + } + + // Extract values belong to $a or $b from a LLVMStruct, the shape is n0xn1. + DotOpMmaV1ConversionHelper::ValueTable + extractLoadedOperand(Value llStruct, int NK, + ConversionPatternRewriter &rewriter) const { + ValueTable rcds; + SmallVector elems = + getElementsFromStruct(llStruct.getLoc(), llStruct, rewriter); + + int offset = 0; + for (int i = 0; offset < elems.size(); ++i) { + for (int k = 0; k < NK; k += 4) { + rcds[{i, k}] = std::make_pair(elems[offset], elems[offset + 1]); + offset += 2; + } + } + + return rcds; + } + +private: + static constexpr unsigned instrShape[] = {16, 16, 4}; + static constexpr unsigned mmaOrder[] = {0, 1}; +}; + +// Helper for conversion of DotOp with mma, that is sm>=80 +struct DotOpMmaV2ConversionHelper { + enum class TensorCoreType : uint8_t { + // floating-point tensor core instr + FP32_FP16_FP16_FP32 = 0, // default + FP32_BF16_BF16_FP32, + FP32_TF32_TF32_FP32, + // integer tensor core instr + INT32_INT1_INT1_INT32, // Not implemented + INT32_INT4_INT4_INT32, // Not implemented + INT32_INT8_INT8_INT32, // Not implemented + // + NOT_APPLICABLE, + }; + + MmaEncodingAttr mmaLayout; + MLIRContext *ctx{}; + + explicit DotOpMmaV2ConversionHelper(MmaEncodingAttr mmaLayout) + : mmaLayout(mmaLayout) { + ctx = mmaLayout.getContext(); + } + + void deduceMmaType(DotOp op) const { mmaType = getMmaType(op); } + void deduceMmaType(Type operandTy) const { + mmaType = getTensorCoreTypeFromOperand(operandTy); + } + + // Get the M and N of mma instruction shape. + static std::tuple getInstrShapeMN() { + // According to DotOpConversionHelper::mmaInstrShape, all the M,N are + // {16,8} + return {16, 8}; + } + + static std::tuple getRepMN(const RankedTensorType &tensorTy) { + auto mmaLayout = tensorTy.getEncoding().cast(); + auto wpt = mmaLayout.getWarpsPerCTA(); + + int M = tensorTy.getShape()[0]; + int N = tensorTy.getShape()[1]; + auto [instrM, instrN] = getInstrShapeMN(); + int repM = std::max(M / (wpt[0] * instrM), 1); + int repN = std::max(N / (wpt[1] * instrN), 1); + return {repM, repN}; + } + + Type getShemPtrTy() const { + switch (mmaType) { + case TensorCoreType::FP32_FP16_FP16_FP32: + return ptr_ty(type::f16Ty(ctx), 3); + case TensorCoreType::FP32_BF16_BF16_FP32: + return ptr_ty(type::i16Ty(ctx), 3); + case TensorCoreType::FP32_TF32_TF32_FP32: + return ptr_ty(type::f32Ty(ctx), 3); + case TensorCoreType::INT32_INT8_INT8_INT32: + return ptr_ty(type::i8Ty(ctx), 3); + default: + llvm::report_fatal_error("mma16816 data type not supported"); + } + return Type{}; + } + + // The type of matrix that loaded by either a ldmatrix or composed lds. + Type getMatType() const { + Type fp32Ty = type::f32Ty(ctx); + Type fp16x2Ty = vec_ty(type::f16Ty(ctx), 2); + Type i16x2Ty = vec_ty(type::i16Ty(ctx), 2); + // floating point types + Type fp16x2Pack4Ty = + LLVM::LLVMStructType::getLiteral(ctx, SmallVector(4, fp16x2Ty)); + // LLVM 14.0 does not support bf16 type, so we use i16 instead. + Type bf16x2Pack4Ty = + LLVM::LLVMStructType::getLiteral(ctx, SmallVector(4, i16x2Ty)); + Type fp32Pack4Ty = + LLVM::LLVMStructType::getLiteral(ctx, SmallVector(4, fp32Ty)); + // integer types + Type i8x4Ty = vec_ty(type::i8Ty(ctx), 4); + Type i8x4Pack4Ty = + LLVM::LLVMStructType::getLiteral(ctx, SmallVector(4, i8x4Ty)); + + switch (mmaType) { + case TensorCoreType::FP32_FP16_FP16_FP32: + return fp16x2Pack4Ty; + case TensorCoreType::FP32_BF16_BF16_FP32: + return bf16x2Pack4Ty; + case TensorCoreType::FP32_TF32_TF32_FP32: + return fp32Pack4Ty; + case TensorCoreType::INT32_INT8_INT8_INT32: + return i8x4Pack4Ty; + default: + llvm::report_fatal_error("Unsupported mma type found"); + } + + return Type{}; + } + + Type getLoadElemTy() { + switch (mmaType) { + case TensorCoreType::FP32_FP16_FP16_FP32: + return vec_ty(type::f16Ty(ctx), 2); + case TensorCoreType::FP32_BF16_BF16_FP32: + return vec_ty(type::bf16Ty(ctx), 2); + case TensorCoreType::FP32_TF32_TF32_FP32: + return type::f32Ty(ctx); + case TensorCoreType::INT32_INT8_INT8_INT32: + return type::i32Ty(ctx); + default: + llvm::report_fatal_error("Unsupported mma type found"); + } + + return Type{}; + } + + Type getMmaRetType() const { + Type fp32Ty = type::f32Ty(ctx); + Type i32Ty = type::i32Ty(ctx); + Type fp32x4Ty = + LLVM::LLVMStructType::getLiteral(ctx, SmallVector(4, fp32Ty)); + Type i32x4Ty = + LLVM::LLVMStructType::getLiteral(ctx, SmallVector(4, i32Ty)); + switch (mmaType) { + case TensorCoreType::FP32_FP16_FP16_FP32: + return fp32x4Ty; + case TensorCoreType::FP32_BF16_BF16_FP32: + return fp32x4Ty; + case TensorCoreType::FP32_TF32_TF32_FP32: + return fp32x4Ty; + case TensorCoreType::INT32_INT8_INT8_INT32: + return i32x4Ty; + default: + llvm::report_fatal_error("Unsupported mma type found"); + } + + return Type{}; + } + + ArrayRef getMmaInstrShape() const { + assert(mmaType != TensorCoreType::NOT_APPLICABLE && + "Unknown mma type found."); + return mmaInstrShape.at(mmaType); + } + + static ArrayRef getMmaInstrShape(TensorCoreType tensorCoreType) { + assert(tensorCoreType != TensorCoreType::NOT_APPLICABLE && + "Unknown mma type found."); + return mmaInstrShape.at(tensorCoreType); + } + + ArrayRef getMmaMatShape() const { + assert(mmaType != TensorCoreType::NOT_APPLICABLE && + "Unknown mma type found."); + return mmaMatShape.at(mmaType); + } + + // Deduce the TensorCoreType from either $a or $b's type. + static TensorCoreType getTensorCoreTypeFromOperand(Type operandTy) { + auto tensorTy = operandTy.cast(); + auto elemTy = tensorTy.getElementType(); + if (elemTy.isF16()) + return TensorCoreType::FP32_FP16_FP16_FP32; + if (elemTy.isF32()) + return TensorCoreType::FP32_TF32_TF32_FP32; + if (elemTy.isBF16()) + return TensorCoreType::FP32_BF16_BF16_FP32; + if (elemTy.isInteger(8)) + return TensorCoreType::INT32_INT8_INT8_INT32; + return TensorCoreType::NOT_APPLICABLE; + } + + int getVec() const { + assert(mmaType != TensorCoreType::NOT_APPLICABLE && + "Unknown mma type found."); + return mmaInstrVec.at(mmaType); + } + + StringRef getMmaInstr() const { + assert(mmaType != TensorCoreType::NOT_APPLICABLE && + "Unknown mma type found."); + return mmaInstrPtx.at(mmaType); + } + + static TensorCoreType getMmaType(triton::DotOp op) { + Value A = op.a(); + Value B = op.b(); + auto aTy = A.getType().cast(); + auto bTy = B.getType().cast(); + // d = a*b + c + auto dTy = op.d().getType().cast(); + + if (dTy.getElementType().isF32()) { + if (aTy.getElementType().isF16() && bTy.getElementType().isF16()) + return TensorCoreType::FP32_FP16_FP16_FP32; + if (aTy.getElementType().isBF16() && bTy.getElementType().isBF16()) + return TensorCoreType::FP32_BF16_BF16_FP32; + if (aTy.getElementType().isF32() && bTy.getElementType().isF32() && + op.allowTF32()) + return TensorCoreType::FP32_TF32_TF32_FP32; + } else if (dTy.getElementType().isInteger(32)) { + if (aTy.getElementType().isInteger(8) && + bTy.getElementType().isInteger(8)) + return TensorCoreType::INT32_INT8_INT8_INT32; + } + + return TensorCoreType::NOT_APPLICABLE; + } + +private: + mutable TensorCoreType mmaType{TensorCoreType::NOT_APPLICABLE}; + + // Used on nvidia GPUs mma layout .version == 2 + // Refer to + // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-storage + // for more details. + inline static const std::map> + mmaInstrShape = { + {TensorCoreType::FP32_FP16_FP16_FP32, {16, 8, 16}}, + {TensorCoreType::FP32_BF16_BF16_FP32, {16, 8, 16}}, + {TensorCoreType::FP32_TF32_TF32_FP32, {16, 8, 8}}, + + {TensorCoreType::INT32_INT1_INT1_INT32, {16, 8, 256}}, + {TensorCoreType::INT32_INT4_INT4_INT32, {16, 8, 64}}, + {TensorCoreType::INT32_INT8_INT8_INT32, {16, 8, 32}}, + }; + + // shape of matrices loaded by ldmatrix (m-n-k, for mxk & kxn matrices) + // Refer to + // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-ldmatrix + // for more details. + inline static const std::map> + mmaMatShape = { + {TensorCoreType::FP32_FP16_FP16_FP32, {8, 8, 8}}, + {TensorCoreType::FP32_BF16_BF16_FP32, {8, 8, 8}}, + {TensorCoreType::FP32_TF32_TF32_FP32, {8, 8, 4}}, + + {TensorCoreType::INT32_INT1_INT1_INT32, {8, 8, 64}}, + {TensorCoreType::INT32_INT4_INT4_INT32, {8, 8, 32}}, + {TensorCoreType::INT32_INT8_INT8_INT32, {8, 8, 16}}, + }; + + // Supported mma instruction in PTX. + // Refer to + // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#warp-level-matrix-instructions-for-mma + // for more details. + inline static const std::map mmaInstrPtx = { + {TensorCoreType::FP32_FP16_FP16_FP32, + "mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32"}, + {TensorCoreType::FP32_BF16_BF16_FP32, + "mma.sync.aligned.m16n8k16.row.col.f32.bf16.bf16.f32"}, + {TensorCoreType::FP32_TF32_TF32_FP32, + "mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32"}, + + {TensorCoreType::INT32_INT1_INT1_INT32, + "mma.sync.aligned.m16n8k256.row.col.s32.b1.b1.s32.xor.popc"}, + {TensorCoreType::INT32_INT4_INT4_INT32, + "mma.sync.aligned.m16n8k64.row.col.satfinite.s32.s4.s4.s32"}, + {TensorCoreType::INT32_INT8_INT8_INT32, + "mma.sync.aligned.m16n8k32.row.col.satfinite.s32.s8.s8.s32"}, + }; + + // vector length per ldmatrix (16*8/element_size_in_bits) + inline static const std::map mmaInstrVec = { + {TensorCoreType::FP32_FP16_FP16_FP32, 8}, + {TensorCoreType::FP32_BF16_BF16_FP32, 8}, + {TensorCoreType::FP32_TF32_TF32_FP32, 4}, + + {TensorCoreType::INT32_INT1_INT1_INT32, 128}, + {TensorCoreType::INT32_INT4_INT4_INT32, 32}, + {TensorCoreType::INT32_INT8_INT8_INT32, 16}, + }; +}; + +// Data loader for mma.16816 instruction. +class MMA16816SmemLoader { +public: + MMA16816SmemLoader(int wpt, ArrayRef order, uint32_t kOrder, + ArrayRef smemStrides, ArrayRef tileShape, + ArrayRef instrShape, ArrayRef matShape, + int perPhase, int maxPhase, int elemBytes, + ConversionPatternRewriter &rewriter, + TypeConverter *typeConverter, const Location &loc) + : order(order.begin(), order.end()), kOrder(kOrder), + tileShape(tileShape.begin(), tileShape.end()), + instrShape(instrShape.begin(), instrShape.end()), + matShape(matShape.begin(), matShape.end()), perPhase(perPhase), + maxPhase(maxPhase), elemBytes(elemBytes), rewriter(rewriter), loc(loc), + ctx(rewriter.getContext()) { + cMatShape = matShape[order[0]]; + sMatShape = matShape[order[1]]; + + sStride = smemStrides[order[1]]; + + // rule: k must be the fast-changing axis. + needTrans = kOrder != order[0]; + canUseLdmatrix = elemBytes == 2 || (!needTrans); // b16 + + if (canUseLdmatrix) { + // Each CTA, the warps is arranged as [1xwpt] if not transposed, + // otherwise [wptx1], and each warp will perform a mma. + numPtrs = + tileShape[order[0]] / (needTrans ? wpt : 1) / instrShape[order[0]]; + } else { + numPtrs = tileShape[order[0]] / wpt / matShape[order[0]]; + } + numPtrs = std::max(numPtrs, 2); + + // Special rule for i8/u8, 4 ptrs for each matrix + if (!canUseLdmatrix && elemBytes == 1) + numPtrs *= 4; + + int loadStrideInMat[2]; + loadStrideInMat[kOrder] = + 2; // instrShape[kOrder] / matShape[kOrder], always 2 + loadStrideInMat[kOrder ^ 1] = + wpt * (instrShape[kOrder ^ 1] / matShape[kOrder ^ 1]); + + pLoadStrideInMat = loadStrideInMat[order[0]]; + sMatStride = + loadStrideInMat[order[1]] / (instrShape[order[1]] / matShape[order[1]]); + + // Each matArr contains warpOffStride matrices. + matArrStride = kOrder == 1 ? 1 : wpt; + warpOffStride = instrShape[kOrder ^ 1] / matShape[kOrder ^ 1]; + } + + // lane = thread % 32 + // warpOff = (thread/32) % wpt(0) + llvm::SmallVector computeOffsets(Value warpOff, Value lane, + Value cSwizzleOffset) { + if (canUseLdmatrix) + return computeLdmatrixMatOffs(warpOff, lane, cSwizzleOffset); + else if (elemBytes == 4 && needTrans) + return computeB32MatOffs(warpOff, lane, cSwizzleOffset); + else if (elemBytes == 1 && needTrans) + return computeB8MatOffs(warpOff, lane, cSwizzleOffset); + else + llvm::report_fatal_error("Invalid smem load config"); + + return {}; + } + + int getNumPtrs() const { return numPtrs; } + + // Compute the offset to the matrix this thread(indexed by warpOff and lane) + // mapped to. + SmallVector computeLdmatrixMatOffs(Value warpId, Value lane, + Value cSwizzleOffset) { + // 4x4 matrices + Value c = urem(lane, i32_val(8)); + Value s = udiv(lane, i32_val(8)); // sub-warp-id + + // Decompose s => s_0, s_1, that is the coordinate in 2x2 matrices in a + // warp + Value s0 = urem(s, i32_val(2)); + Value s1 = udiv(s, i32_val(2)); + + // We use different orders for a and b for better performance. + Value kMatArr = kOrder == 1 ? s1 : s0; + Value nkMatArr = kOrder == 1 ? s0 : s1; + + // matrix coordinate inside a CTA, the matrix layout is [2x2wpt] for A and + // [2wptx2] for B. e.g. Setting wpt=3, The data layout for A(kOrder=1) is + // |0 0 1 1 2 2| -> 0,1,2 are the warpids + // |0 0 1 1 2 2| + // + // for B(kOrder=0) is + // |0 0| -> 0,1,2 are the warpids + // |1 1| + // |2 2| + // |0 0| + // |1 1| + // |2 2| + // Note, for each warp, it handles a 2x2 matrices, that is the coordinate + // address (s0,s1) annotates. + + Value matOff[2]; + matOff[kOrder ^ 1] = add( + mul(warpId, i32_val(warpOffStride)), // warp offset + mul(nkMatArr, i32_val(matArrStride))); // matrix offset inside a warp + matOff[kOrder] = kMatArr; + + // Physical offset (before swizzling) + Value cMatOff = matOff[order[0]]; + Value sMatOff = matOff[order[1]]; + Value cSwizzleMatOff = udiv(cSwizzleOffset, i32_val(cMatShape)); + cMatOff = add(cMatOff, cSwizzleMatOff); + + // row offset inside a matrix, each matrix has 8 rows. + Value sOffInMat = c; + + SmallVector offs(numPtrs); + Value phase = urem(udiv(sOffInMat, i32_val(perPhase)), i32_val(maxPhase)); + Value sOff = add(sOffInMat, mul(sMatOff, i32_val(sMatShape))); + for (int i = 0; i < numPtrs; ++i) { + Value cMatOffI = add(cMatOff, i32_val(i * pLoadStrideInMat)); + cMatOffI = xor_(cMatOffI, phase); + offs[i] = add(mul(cMatOffI, i32_val(cMatShape)), mul(sOff, sStride)); + } + + return offs; + } + + // Compute 32-bit matrix offsets. + SmallVector computeB32MatOffs(Value warpOff, Value lane, + Value cSwizzleOffset) { + assert(needTrans && "Only used in transpose mode."); + // Load tf32 matrices with lds32 + Value cOffInMat = udiv(lane, i32_val(4)); + Value sOffInMat = urem(lane, i32_val(4)); + + Value phase = urem(udiv(sOffInMat, i32_val(perPhase)), i32_val(maxPhase)); + SmallVector offs(numPtrs); + + for (int mat = 0; mat < 4; ++mat) { // Load 4 mats each time + int kMatArrInt = kOrder == 1 ? mat / 2 : mat % 2; + int nkMatArrInt = kOrder == 1 ? mat % 2 : mat / 2; + if (kMatArrInt > 0) // we don't need pointers for k + continue; + Value kMatArr = i32_val(kMatArrInt); + Value nkMatArr = i32_val(nkMatArrInt); + + Value cMatOff = add(mul(warpOff, i32_val(warpOffStride)), + mul(nkMatArr, i32_val(matArrStride))); + Value cSwizzleMatOff = udiv(cSwizzleOffset, i32_val(cMatShape)); + cMatOff = add(cMatOff, cSwizzleMatOff); + + Value sMatOff = kMatArr; + Value sOff = add(sOffInMat, mul(sMatOff, i32_val(sMatShape))); + // FIXME: (kOrder == 1?) is really dirty hack + for (int i = 0; i < numPtrs / 2; ++i) { + Value cMatOffI = + add(cMatOff, i32_val(i * pLoadStrideInMat * (kOrder == 1 ? 1 : 2))); + cMatOffI = xor_(cMatOffI, phase); + Value cOff = add(cOffInMat, mul(cMatOffI, i32_val(cMatShape))); + cOff = urem(cOff, i32_val(tileShape[order[0]])); + sOff = urem(sOff, i32_val(tileShape[order[1]])); + offs[2 * i + nkMatArrInt] = add(cOff, mul(sOff, sStride)); + } + } + return offs; + } + + // compute 8-bit matrix offset. + SmallVector computeB8MatOffs(Value warpOff, Value lane, + Value cSwizzleOffset) { + assert(needTrans && "Only used in transpose mode."); + Value cOffInMat = udiv(lane, i32_val(4)); + Value sOffInMat = + mul(urem(lane, i32_val(4)), i32_val(4)); // each thread load 4 cols + + SmallVector offs(numPtrs); + for (int mat = 0; mat < 4; ++mat) { + int kMatArrInt = kOrder == 1 ? mat / 2 : mat % 2; + int nkMatArrInt = kOrder == 1 ? mat % 2 : mat / 2; + if (kMatArrInt > 0) // we don't need pointers for k + continue; + Value kMatArr = i32_val(kMatArrInt); + Value nkMatArr = i32_val(nkMatArrInt); + + Value cMatOff = add(mul(warpOff, i32_val(warpOffStride)), + mul(nkMatArr, i32_val(matArrStride))); + Value sMatOff = kMatArr; + + for (int loadx4Off = 0; loadx4Off < numPtrs / 8; ++loadx4Off) { + for (int elemOff = 0; elemOff < 4; ++elemOff) { + int ptrOff = loadx4Off * 8 + nkMatArrInt * 4 + elemOff; + Value cMatOffI = add(cMatOff, i32_val(loadx4Off * pLoadStrideInMat * + (kOrder == 1 ? 1 : 2))); + Value sOffInMatElem = add(sOffInMat, i32_val(elemOff)); + + // disable swizzling ... + + Value cOff = add(cOffInMat, mul(cMatOffI, i32_val(cMatShape))); + Value sOff = add(sOffInMatElem, mul(sMatOff, i32_val(sMatShape))); + // To prevent out-of-bound access when tile is too small. + cOff = urem(cOff, i32_val(tileShape[order[0]])); + sOff = urem(sOff, i32_val(tileShape[order[1]])); + offs[ptrOff] = add(cOff, mul(sOff, sStride)); + } + } + } + return offs; + } + + // Load 4 matrices and returns 4 vec<2> elements. + std::tuple + loadX4(int mat0, int mat1, ArrayRef offs, ArrayRef ptrs, + Type ldmatrixRetTy, Type shemPtrTy) const { + assert(mat0 % 2 == 0 && mat1 % 2 == 0 && + "smem matrix load must be aligned"); + int matIdx[2] = {mat0, mat1}; + + int ptrIdx{-1}; + + if (canUseLdmatrix) + ptrIdx = matIdx[order[0]] / (instrShape[order[0]] / matShape[order[0]]); + else if (elemBytes == 4 && needTrans) + ptrIdx = matIdx[order[0]]; + else if (elemBytes == 1 && needTrans) + ptrIdx = matIdx[order[0]] * 4; + else + llvm::report_fatal_error("unsupported mma type found"); + + // The main difference with the original triton code is we removed the + // prefetch-related logic here for the upstream optimizer phase should + // take care with it, and that is transparent in dot conversion. + auto getPtr = [&](int idx) { return ptrs[idx]; }; + + Value ptr = getPtr(ptrIdx); + + if (canUseLdmatrix) { + Value sOffset = + mul(i32_val(matIdx[order[1]] * sMatStride * sMatShape), sStride); + Value sOffsetPtr = gep(shemPtrTy, ptr, sOffset); + + PTXBuilder builder; + // ldmatrix.m8n8.x4 returns 4x2xfp16(that is 4xb32) elements for a + // thread. + auto resArgs = builder.newListOperand(4, "=r"); + auto addrArg = builder.newAddrOperand(sOffsetPtr, "r"); + + auto ldmatrix = builder.create("ldmatrix.sync.aligned.m8n8.x4") + ->o("trans", needTrans /*predicate*/) + .o("shared.b16"); + ldmatrix(resArgs, addrArg); + + // The result type is 4xi32, each i32 is composed of 2xf16 + // elements(adjacent two columns in a row) + Value resV4 = builder.launch(rewriter, loc, ldmatrixRetTy); + + auto getIntAttr = [&](int v) { + return ArrayAttr::get(ctx, {IntegerAttr::get(i32_ty, v)}); + }; + + // The struct should have exactly the same element types. + Type elemType = resV4.getType().cast().getBody()[0]; + + return {extract_val(elemType, resV4, getIntAttr(0)), + extract_val(elemType, resV4, getIntAttr(1)), + extract_val(elemType, resV4, getIntAttr(2)), + extract_val(elemType, resV4, getIntAttr(3))}; + } else if (elemBytes == 4 && + needTrans) { // Use lds.32 to load tf32 matrices + Value ptr2 = getPtr(ptrIdx + 1); + assert(sMatStride == 1); + int sOffsetElem = matIdx[order[1]] * (sMatStride * sMatShape); + Value sOffsetElemVal = mul(i32_val(sOffsetElem), sStride); + int sOffsetArrElem = sMatStride * sMatShape; + Value sOffsetArrElemVal = + add(sOffsetElemVal, mul(i32_val(sOffsetArrElem), sStride)); + + Value elems[4]; + Type elemTy = type::f32Ty(ctx); + Type elemPtrTy = ptr_ty(elemTy); + if (kOrder == 1) { + elems[0] = load(gep(elemPtrTy, ptr, sOffsetElemVal)); + elems[1] = load(gep(elemPtrTy, ptr2, sOffsetElemVal)); + elems[2] = load(gep(elemPtrTy, ptr, sOffsetArrElemVal)); + elems[3] = load(gep(elemPtrTy, ptr2, sOffsetArrElemVal)); + } else { + elems[0] = load(gep(elemPtrTy, ptr, sOffsetElemVal)); + elems[2] = load(gep(elemPtrTy, ptr2, sOffsetElemVal)); + elems[1] = load(gep(elemPtrTy, ptr, sOffsetArrElemVal)); + elems[3] = load(gep(elemPtrTy, ptr2, sOffsetArrElemVal)); + } + return {elems[0], elems[1], elems[2], elems[3]}; + + } else if (elemBytes == 1 && needTrans) { // work with int8 + std::array, 2> ptrs; + ptrs[0] = { + getPtr(ptrIdx), + getPtr(ptrIdx + 1), + getPtr(ptrIdx + 2), + getPtr(ptrIdx + 3), + }; + + ptrs[1] = { + getPtr(ptrIdx + 4), + getPtr(ptrIdx + 5), + getPtr(ptrIdx + 6), + getPtr(ptrIdx + 7), + }; + + assert(sMatStride == 1); + int sOffsetElem = matIdx[order[1]] * (sMatStride * sMatShape); + Value sOffsetElemVal = mul(i32_val(sOffsetElem), sStride); + int sOffsetArrElem = 1 * (sMatStride * sMatShape); + Value sOffsetArrElemVal = + add(sOffsetElemVal, mul(i32_val(sOffsetArrElem), sStride)); + + std::array i8v4Elems; + std::array i32Elems; + i8v4Elems.fill( + rewriter.create(loc, vec_ty(type::i8Ty(ctx), 4))); + + Value i8Elems[4][4]; + Type elemTy = type::i8Ty(ctx); + Type elemPtrTy = ptr_ty(elemTy); + Type i8x4Ty = vec_ty(type::i8Ty(ctx), 4); + if (kOrder == 1) { + for (int i = 0; i < 2; ++i) + for (int j = 0; j < 4; ++j) + i8Elems[i][j] = load(gep(elemPtrTy, ptrs[i][j], sOffsetElemVal)); + + for (int i = 2; i < 4; ++i) + for (int j = 0; j < 4; ++j) + i8Elems[i][j] = + load(gep(elemPtrTy, ptrs[i - 2][j], sOffsetArrElemVal)); + + for (int m = 0; m < 4; ++m) { + for (int e = 0; e < 4; ++e) + i8v4Elems[m] = insert_element(i8v4Elems[m].getType(), i8v4Elems[m], + i8Elems[m][e], i32_val(e)); + i32Elems[m] = bitcast(i8v4Elems[m], i8x4Ty); + } + } else { // k first + for (int j = 0; j < 4; ++j) + i8Elems[0][j] = load(gep(elemPtrTy, ptrs[0][j], sOffsetElemVal)); + for (int j = 0; j < 4; ++j) + i8Elems[2][j] = load(gep(elemPtrTy, ptrs[1][j], sOffsetElemVal)); + for (int j = 0; j < 4; ++j) + i8Elems[1][j] = load(gep(elemPtrTy, ptrs[0][j], sOffsetArrElemVal)); + for (int j = 0; j < 4; ++j) + i8Elems[3][j] = load(gep(elemPtrTy, ptrs[1][j], sOffsetArrElemVal)); + + for (int m = 0; m < 4; ++m) { + for (int e = 0; e < 4; ++e) + i8v4Elems[m] = insert_element(i8v4Elems[m].getType(), i8v4Elems[m], + i8Elems[m][e], i32_val(e)); + i32Elems[m] = bitcast(i8v4Elems[m], i8x4Ty); + } + } + + return {i32Elems[0], i32Elems[1], i32Elems[2], i32Elems[3]}; + } + + assert(false && "Invalid smem load"); + return {Value{}, Value{}, Value{}, Value{}}; + } + +private: + SmallVector order; + int kOrder; + SmallVector tileShape; + SmallVector instrShape; + SmallVector matShape; + int perPhase; + int maxPhase; + int elemBytes; + ConversionPatternRewriter &rewriter; + const Location &loc; + MLIRContext *ctx{}; + + int cMatShape; + int sMatShape; + + Value sStride; + + bool needTrans; + bool canUseLdmatrix; + + int numPtrs; + + int pLoadStrideInMat; + int sMatStride; + + int matArrStride; + int warpOffStride; +}; + +// This class helps to adapt the existing DotOpConversion to the latest +// DotOpOperand layout design. It decouples the exising implementation to two +// parts: +// 1. loading the specific operand matrix(for $a, $b, $c) from smem +// 2. passing the loaded value and perform the mma codegen +struct MMA16816ConversionHelper { + MmaEncodingAttr mmaLayout; + ArrayRef wpt; + SmallVector properWpt; + + Value thread, lane, warp; + + DotOpMmaV2ConversionHelper helper; + ConversionPatternRewriter &rewriter; + TypeConverter *typeConverter; + Location loc; + MLIRContext *ctx{}; + + using ValueTable = std::map, Value>; + + // dotOperand: type of either one operand of dotOp. + MMA16816ConversionHelper(Type dotOperand, MmaEncodingAttr mmaLayout, + Value thread, ConversionPatternRewriter &rewriter, + TypeConverter *typeConverter, Location loc) + : mmaLayout(mmaLayout), thread(thread), helper(mmaLayout), + rewriter(rewriter), typeConverter(typeConverter), loc(loc), + ctx(mmaLayout.getContext()), wpt(mmaLayout.getWarpsPerCTA()) { + helper.deduceMmaType(dotOperand); + + Value _32 = i32_val(32); + lane = urem(thread, _32); + warp = udiv(thread, _32); + } + + // Get a warpId for M axis. + Value getWarpM(int M) const { + auto matShape = helper.getMmaMatShape(); + return urem(urem(warp, i32_val(wpt[0])), i32_val(M / matShape[0])); + } + + // Get a warpId for N axis. + Value getWarpN(int N) const { + auto matShape = helper.getMmaMatShape(); + Value warpMN = udiv(warp, i32_val(wpt[0])); + return urem(urem(warpMN, i32_val(wpt[1])), i32_val(N / matShape[1])); + } + + // Get the mmaInstrShape deducing either from $a or $b. + std::tuple getMmaInstrShape(Type operand) const { + helper.deduceMmaType(operand); + auto mmaInstrShape = helper.getMmaInstrShape(); + int mmaInstrM = mmaInstrShape[0]; + int mmaInstrN = mmaInstrShape[1]; + int mmaInstrK = mmaInstrShape[2]; + return std::make_tuple(mmaInstrM, mmaInstrN, mmaInstrK); + } + + // Get the mmaMatShape deducing either from $a or $b. + std::tuple getMmaMatShape(Type operand) const { + helper.deduceMmaType(operand); + auto matShape = helper.getMmaMatShape(); + int matShapeM = matShape[0]; + int matShapeN = matShape[1]; + int matShapeK = matShape[2]; + return std::make_tuple(matShapeM, matShapeN, matShapeK); + } + + // \param operand is either $a or $b's type. + inline int getNumRepM(Type operand, int M) const { + return getNumRepM(operand, M, wpt[0]); + } + + // \param operand is either $a or $b's type. + inline int getNumRepN(Type operand, int N) const { + return getNumRepN(operand, N, wpt[1]); + } + + // \param operand is either $a or $b's type. + inline int getNumRepK(Type operand, int K) const { + return getNumRepK_(operand, K); + } + + static int getNumRepM(Type operand, int M, int wpt) { + auto tensorCoreType = + DotOpMmaV2ConversionHelper::getTensorCoreTypeFromOperand(operand); + int mmaInstrM = + DotOpMmaV2ConversionHelper::getMmaInstrShape(tensorCoreType)[0]; + return std::max(M / (wpt * mmaInstrM), 1); + } + + static int getNumRepN(Type operand, int N, int wpt) { + auto tensorCoreType = + DotOpMmaV2ConversionHelper::getTensorCoreTypeFromOperand(operand); + int mmaInstrN = + DotOpMmaV2ConversionHelper::getMmaInstrShape(tensorCoreType)[1]; + return std::max(N / (wpt * mmaInstrN), 1); + } + + static int getNumRepK_(Type operand, int K) { + auto tensorCoreType = + DotOpMmaV2ConversionHelper::getTensorCoreTypeFromOperand(operand); + int mmaInstrK = + DotOpMmaV2ConversionHelper::getMmaInstrShape(tensorCoreType)[2]; + return std::max(K / mmaInstrK, 1); + } + + // Get number of elements per thread for $a operand. + static size_t getANumElemsPerThread(RankedTensorType operand, int wpt) { + auto shape = operand.getShape(); + int repM = getNumRepM(operand, shape[0], wpt); + int repK = getNumRepK_(operand, shape[1]); + return 4 * repM * repK; + } + + // Get number of elements per thread for $b operand. + static size_t getBNumElemsPerThread(RankedTensorType operand, int wpt) { + auto shape = operand.getShape(); + int repK = getNumRepK_(operand, shape[0]); + int repN = getNumRepN(operand, shape[1], wpt); + return 4 * std::max(repN / 2, 1) * repK; + } + + // Loading $a from smem to registers, returns a LLVM::Struct. + Value loadA(Value tensor, const SharedMemoryObject &smemObj) const { + auto aTensorTy = tensor.getType().cast(); + + SmallVector shape(aTensorTy.getShape().begin(), + aTensorTy.getShape().end()); + + ValueTable ha; + std::function loadFn; + auto [matShapeM, matShapeN, matShapeK] = getMmaMatShape(aTensorTy); + auto [mmaInstrM, mmaInstrN, mmaInstrK] = getMmaInstrShape(aTensorTy); + + int numRepM = getNumRepM(aTensorTy, shape[0]); + int numRepK = getNumRepK(aTensorTy, shape[1]); + + if (aTensorTy.getEncoding().isa()) { + Value warpM = getWarpM(shape[0]); + // load from smem + // we use ldmatrix.x4 so each warp processes 16x16 elements. + int wpt = std::min(mmaLayout.getWarpsPerCTA()[0], shape[0] / 16); + loadFn = + getLoadMatrixFn(tensor, smemObj, mmaLayout, wpt /*wpt*/, 1 /*kOrder*/, + {mmaInstrM, mmaInstrK} /*instrShape*/, + {matShapeM, matShapeK} /*matShape*/, warpM /*warpId*/, + ha /*vals*/, true /*isA*/); + } else if (aTensorTy.getEncoding().isa()) { + // load from registers, used in gemm fuse + // TODO(Superjomn) Port the logic. + assert(false && "Loading A from register is not supported yet."); + } else { + assert(false && "A's layout is not supported."); + } + + // step1. Perform loading. + for (int m = 0; m < numRepM; ++m) + for (int k = 0; k < numRepK; ++k) + loadFn(2 * m, 2 * k); + + // step2. Format the values to LLVM::Struct to passing to mma codegen. + return composeValuesToDotOperandLayoutStruct(ha, numRepM, numRepK); + } + + // Loading $b from smem to registers, returns a LLVM::Struct. + Value loadB(Value tensor, const SharedMemoryObject &smemObj) { + ValueTable hb; + auto tensorTy = tensor.getType().cast(); + + SmallVector shape(tensorTy.getShape().begin(), + tensorTy.getShape().end()); + + // TODO[Superjomn]: transB cannot be accessed in ConvertLayoutOp. + bool transB = false; + if (transB) { + std::swap(shape[0], shape[1]); + } + + auto [matShapeM, matShapeN, matShapeK] = getMmaMatShape(tensorTy); + auto [mmaInstrM, mmaInstrN, mmaInstrK] = getMmaInstrShape(tensorTy); + int numRepK = getNumRepK(tensorTy, shape[0]); + int numRepN = getNumRepN(tensorTy, shape[1]); + + Value warpN = getWarpN(shape[1]); + // we use ldmatrix.x4 so each warp processes 16x16 elements. + int wpt = std::min(mmaLayout.getWarpsPerCTA()[1], shape[1] / 16); + auto loadFn = + getLoadMatrixFn(tensor, smemObj, mmaLayout, wpt /*wpt*/, 0 /*kOrder*/, + {mmaInstrK, mmaInstrN} /*instrShape*/, + {matShapeK, matShapeN} /*matShape*/, warpN /*warpId*/, + hb /*vals*/, false /*isA*/); + + for (int n = 0; n < std::max(numRepN / 2, 1); ++n) { + for (int k = 0; k < numRepK; ++k) + loadFn(2 * n, 2 * k); + } + + Value result = composeValuesToDotOperandLayoutStruct( + hb, std::max(numRepN / 2, 1), numRepK); + return result; + } + + // Loading $c to registers, returns a Value. + Value loadC(Value tensor, Value llTensor) const { + auto tensorTy = tensor.getType().cast(); + auto [repM, repN] = DotOpMmaV2ConversionHelper::getRepMN(tensorTy); + size_t fcSize = 4 * repM * repN; + + assert(tensorTy.getEncoding().isa() && + "Currently, we only support $c with a mma layout."); + // Load a normal C tensor with mma layout, that should be a + // LLVM::struct with fcSize elements. + auto structTy = llTensor.getType().cast(); + assert(structTy.getBody().size() == fcSize && + "DotOp's $c operand should pass the same number of values as $d in " + "mma layout."); + return llTensor; + } + + // Conduct the Dot conversion. + // \param a, \param b, \param c and \param d are DotOp operands. + // \param loadedA, \param loadedB, \param loadedC, all of them are result of + // loading. + LogicalResult convertDot(Value a, Value b, Value c, Value d, Value loadedA, + Value loadedB, Value loadedC, DotOp op, + DotOpAdaptor adaptor) const { + helper.deduceMmaType(op); + + auto aTensorTy = a.getType().cast(); + auto dTensorTy = d.getType().cast(); + + SmallVector aShape(aTensorTy.getShape().begin(), + aTensorTy.getShape().end()); + + auto dShape = dTensorTy.getShape(); + + // shape / shape_per_cta + int numRepM = getNumRepM(aTensorTy, dShape[0]); + int numRepN = getNumRepN(aTensorTy, dShape[1]); + int numRepK = getNumRepK(aTensorTy, aShape[1]); + + ValueTable ha = + getValuesFromDotOperandLayoutStruct(loadedA, numRepM, numRepK); + ValueTable hb = getValuesFromDotOperandLayoutStruct( + loadedB, std::max(numRepN / 2, 1), numRepK); + auto fc = getElementsFromStruct(loc, loadedC, rewriter); + + auto callMma = [&](unsigned m, unsigned n, unsigned k) { + unsigned colsPerThread = numRepN * 2; + PTXBuilder builder; + auto &mma = *builder.create(helper.getMmaInstr().str()); + auto retArgs = builder.newListOperand(4, "=r"); + auto aArgs = builder.newListOperand({ + {ha[{m, k}], "r"}, + {ha[{m + 1, k}], "r"}, + {ha[{m, k + 1}], "r"}, + {ha[{m + 1, k + 1}], "r"}, + }); + auto bArgs = + builder.newListOperand({{hb[{n, k}], "r"}, {hb[{n, k + 1}], "r"}}); + auto cArgs = builder.newListOperand(); + for (int i = 0; i < 4; ++i) { + cArgs->listAppend(builder.newOperand(fc[m * colsPerThread + 4 * n + i], + std::to_string(i))); + // reuse the output registers + } + + mma(retArgs, aArgs, bArgs, cArgs); + Value mmaOut = builder.launch(rewriter, loc, helper.getMmaRetType()); + + auto getIntAttr = [&](int v) { + return ArrayAttr::get(ctx, {IntegerAttr::get(i32_ty, v)}); + }; + + Type elemTy = mmaOut.getType().cast().getBody()[0]; + for (int i = 0; i < 4; ++i) + fc[m * colsPerThread + 4 * n + i] = + extract_val(elemTy, mmaOut, getIntAttr(i)); + }; + + for (int k = 0; k < numRepK; ++k) + for (int m = 0; m < numRepM; ++m) + for (int n = 0; n < numRepN; ++n) + callMma(2 * m, n, 2 * k); + + Type resElemTy = dTensorTy.getElementType(); + + for (auto &elem : fc) { + elem = bitcast(elem, resElemTy); + } + + // replace with new packed result + Type structTy = LLVM::LLVMStructType::getLiteral( + ctx, SmallVector(fc.size(), resElemTy)); + Value res = getStructFromElements(loc, fc, rewriter, structTy); + rewriter.replaceOp(op, res); + + return success(); + } + +private: + std::function + getLoadMatrixFn(Value tensor, const SharedMemoryObject &smemObj, + MmaEncodingAttr mmaLayout, int wpt, uint32_t kOrder, + SmallVector instrShape, SmallVector matShape, + Value warpId, ValueTable &vals, bool isA) const { + auto tensorTy = tensor.getType().cast(); + // We assumes that the input operand of Dot should be from shared layout. + // TODO(Superjomn) Consider other layouts if needed later. + auto sharedLayout = tensorTy.getEncoding().cast(); + const int perPhase = sharedLayout.getPerPhase(); + const int maxPhase = sharedLayout.getMaxPhase(); + const int elemBytes = tensorTy.getElementTypeBitWidth() / 8; + auto order = sharedLayout.getOrder(); + + // the original register_lds2, but discard the prefetch logic. + auto ld2 = [](ValueTable &vals, int mn, int k, Value val) { + vals[{mn, k}] = val; + }; + + // (a, b) is the coordinate. + auto load = [=, &vals, &ld2](int a, int b) { + MMA16816SmemLoader loader( + wpt, sharedLayout.getOrder(), kOrder, smemObj.strides, + tensorTy.getShape() /*tileShape*/, instrShape, matShape, perPhase, + maxPhase, elemBytes, rewriter, typeConverter, loc); + Value cSwizzleOffset = smemObj.getCSwizzleOffset(order[0]); + SmallVector offs = + loader.computeOffsets(warpId, lane, cSwizzleOffset); + const int numPtrs = loader.getNumPtrs(); + SmallVector ptrs(numPtrs); + + Value smemBase = smemObj.getBaseBeforeSwizzle(order[0], loc, rewriter); + + Type smemPtrTy = helper.getShemPtrTy(); + for (int i = 0; i < numPtrs; ++i) { + ptrs[i] = + bitcast(gep(smemPtrTy, smemBase, ValueRange({offs[i]})), smemPtrTy); + } + + auto [ha0, ha1, ha2, ha3] = loader.loadX4( + (kOrder == 1) ? a : b /*mat0*/, (kOrder == 1) ? b : a /*mat1*/, offs, + ptrs, helper.getMatType(), helper.getShemPtrTy()); + + if (isA) { + ld2(vals, a, b, ha0); + ld2(vals, a + 1, b, ha1); + ld2(vals, a, b + 1, ha2); + ld2(vals, a + 1, b + 1, ha3); + } else { + ld2(vals, a, b, ha0); + ld2(vals, a + 1, b, ha2); + ld2(vals, a, b + 1, ha1); + ld2(vals, a + 1, b + 1, ha3); + } + }; + + return load; + } + + // Compose a map of Values to a LLVM::Struct. + // The layout is a list of Value with coordinate of (i,j), the order is as + // the follows: + // [ + // (0,0), (0,1), (1,0), (1,1), # i=0, j=0 + // (0,2), (0,3), (1,2), (1,3), # i=0, j=1 + // (0,4), (0,5), (1,4), (1,5), # i=0, j=2 + // ... + // (2,0), (2,1), (3,0), (3,1), # i=1, j=0 + // (2,2), (2,3), (3,2), (3,3), # i=1, j=1 + // (2,4), (2,5), (3,4), (3,5), # i=1, j=2 + // ... + // ] + // i \in [0, n0) and j \in [0, n1) + // There should be \param n0 * \param n1 elements in the output Struct. + Value composeValuesToDotOperandLayoutStruct(const ValueTable &vals, int n0, + int n1) const { + std::vector elems; + for (int m = 0; m < n0; ++m) + for (int k = 0; k < n1; ++k) { + elems.push_back(vals.at({2 * m, 2 * k})); + elems.push_back(vals.at({2 * m, 2 * k + 1})); + elems.push_back(vals.at({2 * m + 1, 2 * k})); + elems.push_back(vals.at({2 * m + 1, 2 * k + 1})); + } + + assert(!elems.empty()); + + Type elemTy = elems[0].getType(); + Type structTy = LLVM::LLVMStructType::getLiteral( + ctx, SmallVector(elems.size(), elemTy)); + auto result = getStructFromElements(loc, elems, rewriter, structTy); + return result; + } + + ValueTable getValuesFromDotOperandLayoutStruct(Value value, int n0, + int n1) const { + auto elems = getElementsFromStruct(loc, value, rewriter); + + int offset{}; + ValueTable vals; + for (int i = 0; i < n0; ++i) { + for (int j = 0; j < n1; j++) { + vals[{2 * i, 2 * j}] = elems[offset++]; + vals[{2 * i, 2 * j + 1}] = elems[offset++]; + vals[{2 * i + 1, 2 * j}] = elems[offset++]; + vals[{2 * i + 1, 2 * j + 1}] = elems[offset++]; + } + } + return vals; + } +}; + +// Helper for conversion of FMA DotOp. +struct DotOpFMAConversionHelper { + Attribute layout; + MLIRContext *ctx{}; + + using ValueTable = std::map, Value>; + + explicit DotOpFMAConversionHelper(Attribute layout) + : layout(layout), ctx(layout.getContext()) {} + + SmallVector + getThreadIds(Value threadId, ArrayRef shapePerCTA, + ArrayRef sizePerThread, ArrayRef order, + ConversionPatternRewriter &rewriter, Location loc) const { + int dim = order.size(); + SmallVector threadIds(dim); + for (unsigned k = 0; k < dim - 1; k++) { + Value dimK = i32_val(shapePerCTA[order[k]] / sizePerThread[order[k]]); + Value rem = urem(threadId, dimK); + threadId = udiv(threadId, dimK); + threadIds[order[k]] = rem; + } + Value dimK = i32_val(shapePerCTA[order[dim - 1]]); + threadIds[order[dim - 1]] = urem(threadId, dimK); + return threadIds; + } + + Value loadA(Value A, Value llA, BlockedEncodingAttr dLayout, Value thread, + Location loc, ConversionPatternRewriter &rewriter) const { + auto aTensorTy = A.getType().cast(); + auto aLayout = aTensorTy.getEncoding().cast(); + auto aShape = aTensorTy.getShape(); + + auto aOrder = aLayout.getOrder(); + auto order = dLayout.getOrder(); + + bool isARow = aOrder[0] == 1; + + auto aSmem = getSharedMemoryObjectFromStruct(loc, llA, rewriter); + Value strideAM = aSmem.strides[0]; + Value strideAK = aSmem.strides[1]; + Value strideA0 = isARow ? strideAK : strideAM; + Value strideA1 = isARow ? strideAM : strideAK; + int aNumPtr = 8; + int K = aShape[1]; + int M = aShape[0]; + + auto shapePerCTA = getShapePerCTA(dLayout); + auto sizePerThread = getSizePerThread(dLayout); + + Value _0 = i32_val(0); + + Value mContig = i32_val(sizePerThread[order[1]]); + + // threadId in blocked layout + auto threadIds = + getThreadIds(thread, shapePerCTA, sizePerThread, order, rewriter, loc); + Value threadIdM = threadIds[0]; + + Value offA0 = isARow ? _0 : mul(threadIdM, mContig); + Value offA1 = isARow ? mul(threadIdM, mContig) : _0; + SmallVector aOff(aNumPtr); + for (int i = 0; i < aNumPtr; ++i) { + aOff[i] = add(mul(offA0, strideA0), mul(offA1, strideA1)); + } + auto elemTy = A.getType().cast().getElementType(); + + Type ptrTy = ptr_ty(elemTy); + SmallVector aPtrs(aNumPtr); + for (int i = 0; i < aNumPtr; ++i) + aPtrs[i] = gep(ptrTy, aSmem.base, aOff[i]); + + SmallVector vas; + + int mShapePerCTA = getShapePerCTAForMN(dLayout, true /*isM*/); + int mSizePerThread = getSizePerThreadForMN(dLayout, true /*isM*/); + + for (unsigned k = 0; k < K; ++k) + for (unsigned m = 0; m < M; m += mShapePerCTA) + for (unsigned mm = 0; mm < mSizePerThread; ++mm) { + Value offset = + add(mul(i32_val(m + mm), strideAM), mul(i32_val(k), strideAK)); + Value pa = gep(ptrTy, aPtrs[0], offset); + Value va = load(pa); + vas.emplace_back(va); + } + + return getStructFromValueTable(vas, rewriter, loc, elemTy); + } + + Value loadB(Value B, Value llB, BlockedEncodingAttr dLayout, Value thread, + Location loc, ConversionPatternRewriter &rewriter) const { + auto bTensorTy = B.getType().cast(); + auto bLayout = bTensorTy.getEncoding().cast(); + auto bShape = bTensorTy.getShape(); + + auto bOrder = bLayout.getOrder(); + auto order = dLayout.getOrder(); + + bool isBRow = bOrder[0] == 1; + + auto bSmem = getSharedMemoryObjectFromStruct(loc, llB, rewriter); + Value strideBN = bSmem.strides[1]; + Value strideBK = bSmem.strides[0]; + Value strideB0 = isBRow ? strideBN : strideBK; + Value strideB1 = isBRow ? strideBK : strideBN; + int bNumPtr = 8; + int K = bShape[0]; + int N = bShape[1]; + + auto shapePerCTA = getShapePerCTA(dLayout); + auto sizePerThread = getSizePerThread(dLayout); + + Value _0 = i32_val(0); + + Value nContig = i32_val(sizePerThread[order[0]]); + + // threadId in blocked layout + auto threadIds = + getThreadIds(thread, shapePerCTA, sizePerThread, order, rewriter, loc); + Value threadIdN = threadIds[1]; + + Value offB0 = isBRow ? mul(threadIdN, nContig) : _0; + Value offB1 = isBRow ? _0 : mul(threadIdN, nContig); + SmallVector bOff(bNumPtr); + for (int i = 0; i < bNumPtr; ++i) { + bOff[i] = add(mul(offB0, strideB0), mul(offB1, strideB1)); + } + auto elemTy = B.getType().cast().getElementType(); + + Type ptrTy = ptr_ty(elemTy); + SmallVector bPtrs(bNumPtr); + for (int i = 0; i < bNumPtr; ++i) + bPtrs[i] = gep(ptrTy, bSmem.base, bOff[i]); + + SmallVector vbs; + + int nShapePerCTA = getShapePerCTAForMN(dLayout, false /*isM*/); + int nSizePerThread = getSizePerThreadForMN(dLayout, false /*isM*/); + + for (unsigned k = 0; k < K; ++k) + for (unsigned n = 0; n < N; n += nShapePerCTA) + for (unsigned nn = 0; nn < nSizePerThread; ++nn) { + Value offset = + add(mul(i32_val(n + nn), strideBN), mul(i32_val(k), strideBK)); + Value pb = gep(ptrTy, bPtrs[0], offset); + Value vb = load(pb); + vbs.emplace_back(vb); + } + + return getStructFromValueTable(vbs, rewriter, loc, elemTy); + } + + ValueTable getValueTableFromStruct(Value val, int K, int n0, int shapePerCTA, + int sizePerThread, + ConversionPatternRewriter &rewriter, + Location loc) const { + ValueTable res; + auto elems = getElementsFromStruct(loc, val, rewriter); + int index = 0; + for (unsigned k = 0; k < K; ++k) { + for (unsigned m = 0; m < n0; m += shapePerCTA) + for (unsigned mm = 0; mm < sizePerThread; ++mm) { + res[{m + mm, k}] = elems[index++]; + } + } + return res; + } + + Value getStructFromValueTable(ArrayRef vals, + ConversionPatternRewriter &rewriter, + Location loc, Type elemTy) const { + SmallVector elemTypes(vals.size(), elemTy); + SmallVector elems; + elems.reserve(vals.size()); + for (auto &val : vals) { + elems.push_back(val); + } + + Type structTy = struct_ty(elemTypes); + return getStructFromElements(loc, elems, rewriter, structTy); + } + + // get number of elements per thread for $a or $b. + static int getNumElemsPerThread(ArrayRef shape, + DotOperandEncodingAttr dotOpLayout) { + auto blockedLayout = dotOpLayout.getParent().cast(); + auto shapePerCTA = getShapePerCTA(blockedLayout); + auto sizePerThread = getSizePerThread(blockedLayout); + + // TODO[Superjomn]: we assume the k aixs is fixed for $a and $b here, fix it + // if not. + int K = dotOpLayout.getOpIdx() == 0 ? shape[1] : shape[0]; + int otherDim = dotOpLayout.getOpIdx() == 1 ? shape[1] : shape[0]; + + bool isM = dotOpLayout.getOpIdx() == 0; + int shapePerCTAMN = getShapePerCTAForMN(blockedLayout, isM); + int sizePerThreadMN = getSizePerThreadForMN(blockedLayout, isM); + return K * std::max(otherDim / shapePerCTAMN, 1) * sizePerThreadMN; + } + + // Get shapePerCTA for M or N axis. + static int getShapePerCTAForMN(BlockedEncodingAttr layout, bool isM) { + auto order = layout.getOrder(); + auto shapePerCTA = getShapePerCTA(layout); + + int mShapePerCTA = + order[0] == 1 ? shapePerCTA[order[1]] : shapePerCTA[order[0]]; + int nShapePerCTA = + order[0] == 0 ? shapePerCTA[order[1]] : shapePerCTA[order[0]]; + return isM ? mShapePerCTA : nShapePerCTA; + } + + // Get sizePerThread for M or N axis. + static int getSizePerThreadForMN(BlockedEncodingAttr layout, bool isM) { + auto order = layout.getOrder(); + auto sizePerThread = getSizePerThread(layout); + + int mSizePerThread = + order[0] == 1 ? sizePerThread[order[1]] : sizePerThread[order[0]]; + int nSizePerThread = + order[0] == 0 ? sizePerThread[order[1]] : sizePerThread[order[0]]; + return isM ? mSizePerThread : nSizePerThread; + } +}; + +} // namespace LLVM +} // namespace mlir + +#endif diff --git a/lib/Conversion/TritonGPUToLLVM/DotOpToLLVM.cpp b/lib/Conversion/TritonGPUToLLVM/DotOpToLLVM.cpp new file mode 100644 index 000000000000..d79093261886 --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/DotOpToLLVM.cpp @@ -0,0 +1,311 @@ +#include "DotOpToLLVM.h" +#include "DotOpHelpers.h" +#include "Utility.h" + +using namespace mlir; +using namespace mlir::triton; + +using ::mlir::LLVM::DotOpFMAConversionHelper; +using ::mlir::LLVM::DotOpMmaV1ConversionHelper; +using ::mlir::LLVM::getElementsFromStruct; +using ::mlir::LLVM::getStructFromElements; +using ::mlir::LLVM::MMA16816ConversionHelper; +using ::mlir::triton::gpu::DotOperandEncodingAttr; +using ::mlir::triton::gpu::MmaEncodingAttr; + +struct DotOpConversion : public ConvertTritonGPUOpToLLVMPattern { + using ConvertTritonGPUOpToLLVMPattern< + triton::DotOp>::ConvertTritonGPUOpToLLVMPattern; + + LogicalResult + matchAndRewrite(triton::DotOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + // D = A * B + C + Value A = op.a(); + Value D = op.getResult(); + + // Here we assume the DotOp's operands always comes from shared memory. + auto AShape = A.getType().cast().getShape(); + size_t reduceAxis = 1; + unsigned K = AShape[reduceAxis]; + bool isOuter = K == 1; + + MmaEncodingAttr mmaLayout = D.getType() + .cast() + .getEncoding() + .dyn_cast(); + if (!isOuter && mmaLayout && supportMMA(op, mmaLayout.getVersionMajor())) { + if (mmaLayout.isVolta()) + return convertMMA884(op, adaptor, rewriter); + if (mmaLayout.isAmpere()) + return convertMMA16816(op, adaptor, rewriter); + + llvm::report_fatal_error( + "Unsupported MMA kind found when converting DotOp to LLVM."); + } + + if (D.getType() + .cast() + .getEncoding() + .isa()) + return convertFMADot(op, adaptor, rewriter); + + llvm::report_fatal_error( + "Unsupported DotOp found when converting TritonGPU to LLVM."); + } + +private: + // Convert to mma.m16n8k16 + LogicalResult convertMMA16816(triton::DotOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const { + auto loc = op.getLoc(); + auto mmaLayout = op.getResult() + .getType() + .cast() + .getEncoding() + .cast(); + + Value A = op.a(); + Value B = op.b(); + Value C = op.c(); + + MMA16816ConversionHelper mmaHelper(A.getType(), mmaLayout, + getThreadId(rewriter, loc), rewriter, + getTypeConverter(), loc); + + auto ATensorTy = A.getType().cast(); + auto BTensorTy = B.getType().cast(); + + assert(ATensorTy.getEncoding().isa() && + BTensorTy.getEncoding().isa() && + "Both $a and %b should be DotOperand layout."); + + Value loadedA, loadedB, loadedC; + loadedA = adaptor.a(); + loadedB = adaptor.b(); + loadedC = mmaHelper.loadC(op.c(), adaptor.c()); + + return mmaHelper.convertDot(A, B, C, op.d(), loadedA, loadedB, loadedC, op, + adaptor); + } + /// Convert to mma.m8n8k4 + LogicalResult convertMMA884(triton::DotOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const { + auto *ctx = op.getContext(); + auto loc = op.getLoc(); + + Value A = op.a(); + Value B = op.b(); + Value D = op.getResult(); + auto mmaLayout = D.getType() + .cast() + .getEncoding() + .cast(); + auto ALayout = A.getType() + .cast() + .getEncoding() + .cast(); + auto BLayout = B.getType() + .cast() + .getEncoding() + .cast(); + + auto ATensorTy = A.getType().cast(); + auto BTensorTy = B.getType().cast(); + auto DTensorTy = D.getType().cast(); + auto AShape = ATensorTy.getShape(); + auto BShape = BTensorTy.getShape(); + auto DShape = DTensorTy.getShape(); + auto wpt = mmaLayout.getWarpsPerCTA(); + + bool isARow = ALayout.getIsMMAv1Row().cast().getValue(); + bool isBRow = BLayout.getIsMMAv1Row().cast().getValue(); + + DotOpMmaV1ConversionHelper helper(mmaLayout); + + unsigned numM = helper.getNumM(AShape, isARow); + unsigned numN = helper.getNumN(BShape, isBRow); + unsigned NK = AShape[1]; + + auto has = helper.extractLoadedOperand(adaptor.a(), NK, rewriter); + auto hbs = helper.extractLoadedOperand(adaptor.b(), NK, rewriter); + + // Initialize accumulators with external values, the acc holds the + // accumulator value that is shared between the MMA instructions inside a + // DotOp, we can call the order of the values the accumulator-internal + // order. + SmallVector acc = getElementsFromStruct(loc, adaptor.c(), rewriter); + size_t resSize = acc.size(); + + // The resVals holds the final result of the DotOp. + // NOTE The current order of resVals is different from acc, we call it the + // accumulator-external order. and + SmallVector resVals(resSize); + + auto getIdx = [&](int m, int n) { + std::vector idx{{ + (m * 2 + 0) + (n * 4 + 0) * numM, // row0 + (m * 2 + 0) + (n * 4 + 1) * numM, + (m * 2 + 1) + (n * 4 + 0) * numM, // row1 + (m * 2 + 1) + (n * 4 + 1) * numM, + (m * 2 + 0) + (n * 4 + 2) * numM, // row2 + (m * 2 + 0) + (n * 4 + 3) * numM, + (m * 2 + 1) + (n * 4 + 2) * numM, // row3 + (m * 2 + 1) + (n * 4 + 3) * numM, + }}; + return idx; + }; + + { // convert the acc's value from accumuator-external order to + // accumulator-internal order. + SmallVector accInit(acc.size()); + + for (unsigned m = 0; m < numM / 2; ++m) + for (unsigned n = 0; n < numN / 2; ++n) { + auto idx = getIdx(m, n); + for (unsigned i = 0; i < 8; ++i) + accInit[idx[i]] = acc[(m * numN / 2 + n) * 8 + i]; + } + + acc = accInit; + } + + auto callMMA = [&](unsigned m, unsigned n, unsigned k) { + auto ha = has.at({m, k}); + auto hb = hbs.at({n, k}); + + PTXBuilder builder; + auto idx = getIdx(m, n); + + auto *resOprs = builder.newListOperand(8, "=f"); + auto *AOprs = builder.newListOperand({ + {ha.first, "r"}, + {ha.second, "r"}, + }); + + auto *BOprs = builder.newListOperand({ + {hb.first, "r"}, + {hb.second, "r"}, + }); + auto *COprs = builder.newListOperand(); + for (int i = 0; i < 8; ++i) + COprs->listAppend(builder.newOperand(acc[idx[i]], std::to_string(i))); + + auto mma = builder.create("mma.sync.aligned.m8n8k4") + ->o(isARow ? "row" : "col") + .o(isBRow ? "row" : "col") + .o("f32.f16.f16.f32"); + + mma(resOprs, AOprs, BOprs, COprs); + + Value res = + builder.launch(rewriter, loc, helper.getMmaRetType(ATensorTy)); + + auto getIntAttr = [&](int v) { + return ArrayAttr::get(ctx, {IntegerAttr::get(i32_ty, v)}); + }; + + for (unsigned i = 0; i < 8; i++) { + Value elem = extract_val(f32_ty, res, getIntAttr(i)); + acc[idx[i]] = elem; + resVals[(m * numN / 2 + n) * 8 + i] = elem; + } + }; + + for (unsigned k = 0; k < NK; k += 4) + for (unsigned m = 0; m < numM / 2; ++m) + for (unsigned n = 0; n < numN / 2; ++n) { + callMMA(m, n, k); + } + + Type structTy = LLVM::LLVMStructType::getLiteral( + ctx, SmallVector(resSize, type::f32Ty(ctx))); + Value res = getStructFromElements(loc, resVals, rewriter, structTy); + rewriter.replaceOp(op, res); + return success(); + } + + LogicalResult convertFMADot(triton::DotOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const { + auto *ctx = rewriter.getContext(); + auto loc = op.getLoc(); + auto threadId = getThreadId(rewriter, loc); + + auto A = op.a(); + auto B = op.b(); + auto C = op.c(); + auto D = op.getResult(); + + auto aTensorTy = A.getType().cast(); + auto bTensorTy = B.getType().cast(); + auto cTensorTy = C.getType().cast(); + auto dTensorTy = D.getType().cast(); + + auto aShape = aTensorTy.getShape(); + auto bShape = bTensorTy.getShape(); + auto cShape = cTensorTy.getShape(); + + BlockedEncodingAttr dLayout = + dTensorTy.getEncoding().cast(); + auto order = dLayout.getOrder(); + auto cc = getElementsFromStruct(loc, adaptor.c(), rewriter); + + DotOpFMAConversionHelper helper(dLayout); + Value llA = adaptor.a(); + Value llB = adaptor.b(); + + auto sizePerThread = getSizePerThread(dLayout); + auto shapePerCTA = getShapePerCTA(dLayout); + + int K = aShape[1]; + int M = aShape[0]; + int N = bShape[1]; + + int mShapePerCTA = + order[0] == 1 ? shapePerCTA[order[1]] : shapePerCTA[order[0]]; + int mSizePerThread = + order[0] == 1 ? sizePerThread[order[1]] : sizePerThread[order[0]]; + int nShapePerCTA = + order[0] == 0 ? shapePerCTA[order[1]] : shapePerCTA[order[0]]; + int nSizePerThread = + order[0] == 0 ? sizePerThread[order[1]] : sizePerThread[order[0]]; + + auto has = helper.getValueTableFromStruct(llA, K, M, mShapePerCTA, + mSizePerThread, rewriter, loc); + auto hbs = helper.getValueTableFromStruct(llB, K, N, nShapePerCTA, + nSizePerThread, rewriter, loc); + + SmallVector ret = cc; + bool isCRow = order[0] == 1; + + for (unsigned k = 0; k < K; k++) { + for (unsigned m = 0; m < M; m += mShapePerCTA) + for (unsigned n = 0; n < N; n += nShapePerCTA) + for (unsigned mm = 0; mm < mSizePerThread; ++mm) + for (unsigned nn = 0; nn < nSizePerThread; ++nn) { + int mIdx = m / mShapePerCTA * mSizePerThread + mm; + int nIdx = n / nShapePerCTA * nSizePerThread + nn; + + int z = isCRow ? mIdx * N / nShapePerCTA * mSizePerThread + nIdx + : nIdx * M / mShapePerCTA * nSizePerThread + mIdx; + ret[z] = rewriter.create( + loc, has[{m + mm, k}], hbs[{n + nn, k}], ret[z]); + } + } + + auto res = getStructFromElements( + loc, ret, rewriter, + struct_ty(SmallVector(ret.size(), ret[0].getType()))); + rewriter.replaceOp(op, res); + + return success(); + } +}; + +void populateDotOpToLLVMPatterns(mlir::LLVMTypeConverter &typeConverter, + RewritePatternSet &patterns, int numWarps, + AxisInfoAnalysis &axisInfoAnalysis, + const Allocation *allocation, Value smem, + PatternBenefit benefit) { + patterns.add(typeConverter, allocation, smem, benefit); +} diff --git a/lib/Conversion/TritonGPUToLLVM/DotOpToLLVM.h b/lib/Conversion/TritonGPUToLLVM/DotOpToLLVM.h new file mode 100644 index 000000000000..716147e46918 --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/DotOpToLLVM.h @@ -0,0 +1,15 @@ +#ifndef TRITON_CONVERSION_TRITONGPU_TO_LLVM_DOT_OP_H +#define TRITON_CONVERSION_TRITONGPU_TO_LLVM_DOT_OP_H + +#include "TritonGPUToLLVMBase.h" + +using namespace mlir; +using namespace mlir::triton; + +void populateDotOpToLLVMPatterns(mlir::LLVMTypeConverter &typeConverter, + RewritePatternSet &patterns, int numWarps, + AxisInfoAnalysis &axisInfoAnalysis, + const Allocation *allocation, Value smem, + PatternBenefit benefit); + +#endif diff --git a/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp b/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp new file mode 100644 index 000000000000..aa2fab031035 --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.cpp @@ -0,0 +1,865 @@ +#include "ElementwiseOpToLLVM.h" + +using namespace mlir; +using namespace mlir::triton; + +using ::mlir::LLVM::getElementsFromStruct; +using ::mlir::LLVM::getStructFromElements; +using ::mlir::triton::gpu::getElemsPerThread; + +struct FpToFpOpConversion + : public ConvertTritonGPUOpToLLVMPattern { + using ConvertTritonGPUOpToLLVMPattern< + triton::FpToFpOp>::ConvertTritonGPUOpToLLVMPattern; + + static SmallVector + convertFp8x4ToFp16x4(Location loc, ConversionPatternRewriter &rewriter, + const Value &v0, const Value &v1, const Value &v2, + const Value &v3) { + auto ctx = rewriter.getContext(); + auto fp8x4VecTy = vec_ty(i8_ty, 4); + Value fp8x4Vec = undef(fp8x4VecTy); + fp8x4Vec = insert_element(fp8x4VecTy, fp8x4Vec, v0, i32_val(0)); + fp8x4Vec = insert_element(fp8x4VecTy, fp8x4Vec, v1, i32_val(1)); + fp8x4Vec = insert_element(fp8x4VecTy, fp8x4Vec, v2, i32_val(2)); + fp8x4Vec = insert_element(fp8x4VecTy, fp8x4Vec, v3, i32_val(3)); + fp8x4Vec = bitcast(fp8x4Vec, i32_ty); + + PTXBuilder builder; + auto *ptxAsm = "{ \n" + ".reg .b32 a<2>, b<2>; \n" + "prmt.b32 a0, 0, $2, 0x5040; \n" + "prmt.b32 a1, 0, $2, 0x7060; \n" + "lop3.b32 b0, a0, 0x7fff7fff, 0, 0xc0; \n" + "lop3.b32 b1, a1, 0x7fff7fff, 0, 0xc0; \n" + "shr.b32 b0, b0, 1; \n" + "shr.b32 b1, b1, 1; \n" + "lop3.b32 $0, b0, 0x80008000, a0, 0xf8; \n" + "lop3.b32 $1, b1, 0x80008000, a1, 0xf8; \n" + "}"; + auto &call = *builder.create(ptxAsm); + + auto *o0 = builder.newOperand("=r"); + auto *o1 = builder.newOperand("=r"); + auto *i = builder.newOperand(fp8x4Vec, "r"); + call({o0, o1, i}, /*onlyAttachMLIRArgs=*/true); + + auto fp16x2VecTy = vec_ty(f16_ty, 2); + auto fp16x2x2StructTy = + struct_ty(SmallVector{fp16x2VecTy, fp16x2VecTy}); + auto fp16x2x2Struct = + builder.launch(rewriter, loc, fp16x2x2StructTy, false); + auto fp16x2Vec0 = + extract_val(fp16x2VecTy, fp16x2x2Struct, rewriter.getI32ArrayAttr({0})); + auto fp16x2Vec1 = + extract_val(fp16x2VecTy, fp16x2x2Struct, rewriter.getI32ArrayAttr({1})); + return {extract_element(f16_ty, fp16x2Vec0, i32_val(0)), + extract_element(f16_ty, fp16x2Vec0, i32_val(1)), + extract_element(f16_ty, fp16x2Vec1, i32_val(0)), + extract_element(f16_ty, fp16x2Vec1, i32_val(1))}; + } + + static SmallVector + convertFp16x4ToFp8x4(Location loc, ConversionPatternRewriter &rewriter, + const Value &v0, const Value &v1, const Value &v2, + const Value &v3) { + auto ctx = rewriter.getContext(); + auto fp16x2VecTy = vec_ty(f16_ty, 2); + Value fp16x2Vec0 = undef(fp16x2VecTy); + Value fp16x2Vec1 = undef(fp16x2VecTy); + fp16x2Vec0 = insert_element(fp16x2VecTy, fp16x2Vec0, v0, i32_val(0)); + fp16x2Vec0 = insert_element(fp16x2VecTy, fp16x2Vec0, v1, i32_val(1)); + fp16x2Vec1 = insert_element(fp16x2VecTy, fp16x2Vec1, v2, i32_val(0)); + fp16x2Vec1 = insert_element(fp16x2VecTy, fp16x2Vec1, v3, i32_val(1)); + fp16x2Vec0 = bitcast(fp16x2Vec0, i32_ty); + fp16x2Vec1 = bitcast(fp16x2Vec1, i32_ty); + + PTXBuilder builder; + auto *ptxAsm = "{ \n" + ".reg .b32 a<2>, b<2>; \n" + "shl.b32 a0, $1, 1; \n" + "shl.b32 a1, $2, 1; \n" + "lop3.b32 a0, a0, 0x7fff7fff, 0, 0xc0; \n" + "lop3.b32 a1, a1, 0x7fff7fff, 0, 0xc0; \n" + "add.u32 a0, a0, 0x00800080; \n" + "add.u32 a1, a1, 0x00800080; \n" + "lop3.b32 b0, $1, 0x80008000, a0, 0xea; \n" + "lop3.b32 b1, $2, 0x80008000, a1, 0xea; \n" + "prmt.b32 $0, b0, b1, 0x7531; \n" + "}"; + auto &call = *builder.create(ptxAsm); + + auto *o = builder.newOperand("=r"); + auto *i0 = builder.newOperand(fp16x2Vec0, "r"); + auto *i1 = builder.newOperand(fp16x2Vec1, "r"); + call({o, i0, i1}, /*onlyAttachMLIRArgs=*/true); + + auto fp8x4VecTy = vec_ty(i8_ty, 4); + auto fp8x4Vec = builder.launch(rewriter, loc, fp8x4VecTy, false); + return {extract_element(i8_ty, fp8x4Vec, i32_val(0)), + extract_element(i8_ty, fp8x4Vec, i32_val(1)), + extract_element(i8_ty, fp8x4Vec, i32_val(2)), + extract_element(i8_ty, fp8x4Vec, i32_val(3))}; + } + + static SmallVector + convertFp8x4ToBf16x4(Location loc, ConversionPatternRewriter &rewriter, + const Value &v0, const Value &v1, const Value &v2, + const Value &v3) { + auto ctx = rewriter.getContext(); + auto fp8x4VecTy = vec_ty(i8_ty, 4); + Value fp8x4Vec = undef(fp8x4VecTy); + fp8x4Vec = insert_element(fp8x4VecTy, fp8x4Vec, v0, i32_val(0)); + fp8x4Vec = insert_element(fp8x4VecTy, fp8x4Vec, v1, i32_val(1)); + fp8x4Vec = insert_element(fp8x4VecTy, fp8x4Vec, v2, i32_val(2)); + fp8x4Vec = insert_element(fp8x4VecTy, fp8x4Vec, v3, i32_val(3)); + fp8x4Vec = bitcast(fp8x4Vec, i32_ty); + + PTXBuilder builder; + auto *ptxAsm = "{ \n" + ".reg .b32 a<2>, sign<2>, nosign<2>, b<2>; \n" + "prmt.b32 a0, 0, $2, 0x5040; \n" + "prmt.b32 a1, 0, $2, 0x7060; \n" + "and.b32 sign0, a0, 0x80008000; \n" + "and.b32 sign1, a1, 0x80008000; \n" + "and.b32 nosign0, a0, 0x7fff7fff; \n" + "and.b32 nosign1, a1, 0x7fff7fff; \n" + "shr.b32 nosign0, nosign0, 4; \n" + "shr.b32 nosign1, nosign1, 4; \n" + "add.u32 nosign0, nosign0, 0x38003800; \n" + "add.u32 nosign1, nosign1, 0x38003800; \n" + "or.b32 $0, sign0, nosign0; \n" + "or.b32 $1, sign1, nosign1; \n" + "}"; + auto &call = *builder.create(ptxAsm); + + auto *o0 = builder.newOperand("=r"); + auto *o1 = builder.newOperand("=r"); + auto *i = builder.newOperand(fp8x4Vec, "r"); + call({o0, o1, i}, /* onlyAttachMLIRArgs */ true); + + auto bf16x2VecTy = vec_ty(i16_ty, 2); + auto bf16x2x2StructTy = + struct_ty(SmallVector{bf16x2VecTy, bf16x2VecTy}); + auto bf16x2x2Struct = + builder.launch(rewriter, loc, bf16x2x2StructTy, false); + auto bf16x2Vec0 = + extract_val(bf16x2VecTy, bf16x2x2Struct, rewriter.getI32ArrayAttr({0})); + auto bf16x2Vec1 = + extract_val(bf16x2VecTy, bf16x2x2Struct, rewriter.getI32ArrayAttr({1})); + return {extract_element(i16_ty, bf16x2Vec0, i32_val(0)), + extract_element(i16_ty, bf16x2Vec0, i32_val(1)), + extract_element(i16_ty, bf16x2Vec1, i32_val(0)), + extract_element(i16_ty, bf16x2Vec1, i32_val(1))}; + } + + static SmallVector + convertBf16x4ToFp8x4(Location loc, ConversionPatternRewriter &rewriter, + const Value &v0, const Value &v1, const Value &v2, + const Value &v3) { + auto ctx = rewriter.getContext(); + auto bf16x2VecTy = vec_ty(i16_ty, 2); + Value bf16x2Vec0 = undef(bf16x2VecTy); + Value bf16x2Vec1 = undef(bf16x2VecTy); + bf16x2Vec0 = insert_element(bf16x2VecTy, bf16x2Vec0, v0, i32_val(0)); + bf16x2Vec0 = insert_element(bf16x2VecTy, bf16x2Vec0, v1, i32_val(1)); + bf16x2Vec1 = insert_element(bf16x2VecTy, bf16x2Vec1, v2, i32_val(0)); + bf16x2Vec1 = insert_element(bf16x2VecTy, bf16x2Vec1, v3, i32_val(1)); + bf16x2Vec0 = bitcast(bf16x2Vec0, i32_ty); + bf16x2Vec1 = bitcast(bf16x2Vec1, i32_ty); + + PTXBuilder builder; + auto *ptxAsm = "{ \n" + ".reg .u32 sign, sign<2>, nosign, nosign<2>; \n" + ".reg .u32 fp8_min, fp8_max, rn_, zero; \n" + "mov.u32 fp8_min, 0x38003800; \n" + "mov.u32 fp8_max, 0x3ff03ff0; \n" + "mov.u32 rn_, 0x80008; \n" + "mov.u32 zero, 0; \n" + "and.b32 sign0, $1, 0x80008000; \n" + "and.b32 sign1, $2, 0x80008000; \n" + "prmt.b32 sign, sign0, sign1, 0x7531; \n" + "and.b32 nosign0, $1, 0x7fff7fff; \n" + "and.b32 nosign1, $2, 0x7fff7fff; \n" + ".reg .u32 nosign_0_<2>, nosign_1_<2>; \n" + "and.b32 nosign_0_0, nosign0, 0xffff0000; \n" + "max.u32 nosign_0_0, nosign_0_0, 0x38000000; \n" + "min.u32 nosign_0_0, nosign_0_0, 0x3ff00000; \n" + "and.b32 nosign_0_1, nosign0, 0x0000ffff; \n" + "max.u32 nosign_0_1, nosign_0_1, 0x3800; \n" + "min.u32 nosign_0_1, nosign_0_1, 0x3ff0; \n" + "or.b32 nosign0, nosign_0_0, nosign_0_1; \n" + "and.b32 nosign_1_0, nosign1, 0xffff0000; \n" + "max.u32 nosign_1_0, nosign_1_0, 0x38000000; \n" + "min.u32 nosign_1_0, nosign_1_0, 0x3ff00000; \n" + "and.b32 nosign_1_1, nosign1, 0x0000ffff; \n" + "max.u32 nosign_1_1, nosign_1_1, 0x3800; \n" + "min.u32 nosign_1_1, nosign_1_1, 0x3ff0; \n" + "or.b32 nosign1, nosign_1_0, nosign_1_1; \n" + "add.u32 nosign0, nosign0, rn_; \n" + "add.u32 nosign1, nosign1, rn_; \n" + "sub.u32 nosign0, nosign0, 0x38003800; \n" + "sub.u32 nosign1, nosign1, 0x38003800; \n" + "shr.u32 nosign0, nosign0, 4; \n" + "shr.u32 nosign1, nosign1, 4; \n" + "prmt.b32 nosign, nosign0, nosign1, 0x6420; \n" + "or.b32 $0, nosign, sign; \n" + "}"; + auto &call = *builder.create(ptxAsm); + + auto *o = builder.newOperand("=r"); + auto *i0 = builder.newOperand(bf16x2Vec0, "r"); + auto *i1 = builder.newOperand(bf16x2Vec1, "r"); + call({o, i0, i1}, /*onlyAttachMLIRArgs=*/true); + + auto fp8x4VecTy = vec_ty(i8_ty, 4); + auto fp8x4Vec = builder.launch(rewriter, loc, fp8x4VecTy, false); + return {extract_element(i8_ty, fp8x4Vec, i32_val(0)), + extract_element(i8_ty, fp8x4Vec, i32_val(1)), + extract_element(i8_ty, fp8x4Vec, i32_val(2)), + extract_element(i8_ty, fp8x4Vec, i32_val(3))}; + } + + static SmallVector + convertFp8x4ToFp32x4(Location loc, ConversionPatternRewriter &rewriter, + const Value &v0, const Value &v1, const Value &v2, + const Value &v3) { + auto fp16Values = convertFp8x4ToFp16x4(loc, rewriter, v0, v1, v2, v3); + return {rewriter.create(loc, f32_ty, fp16Values[0]), + rewriter.create(loc, f32_ty, fp16Values[1]), + rewriter.create(loc, f32_ty, fp16Values[2]), + rewriter.create(loc, f32_ty, fp16Values[3])}; + } + + static SmallVector + convertFp32x4ToFp8x4(Location loc, ConversionPatternRewriter &rewriter, + const Value &v0, const Value &v1, const Value &v2, + const Value &v3) { + auto c0 = rewriter.create(loc, f16_ty, v0); + auto c1 = rewriter.create(loc, f16_ty, v1); + auto c2 = rewriter.create(loc, f16_ty, v2); + auto c3 = rewriter.create(loc, f16_ty, v3); + return convertFp16x4ToFp8x4(loc, rewriter, c0, c1, c2, c3); + } + + static SmallVector + convertFp8x4ToFp64x4(Location loc, ConversionPatternRewriter &rewriter, + const Value &v0, const Value &v1, const Value &v2, + const Value &v3) { + auto fp16Values = convertFp8x4ToFp16x4(loc, rewriter, v0, v1, v2, v3); + return {rewriter.create(loc, f64_ty, fp16Values[0]), + rewriter.create(loc, f64_ty, fp16Values[1]), + rewriter.create(loc, f64_ty, fp16Values[2]), + rewriter.create(loc, f64_ty, fp16Values[3])}; + } + + static SmallVector + convertFp64x4ToFp8x4(Location loc, ConversionPatternRewriter &rewriter, + const Value &v0, const Value &v1, const Value &v2, + const Value &v3) { + auto c0 = rewriter.create(loc, f16_ty, v0); + auto c1 = rewriter.create(loc, f16_ty, v1); + auto c2 = rewriter.create(loc, f16_ty, v2); + auto c3 = rewriter.create(loc, f16_ty, v3); + return convertFp16x4ToFp8x4(loc, rewriter, c0, c1, c2, c3); + } + + static Value convertBf16ToFp32(Location loc, + ConversionPatternRewriter &rewriter, + const Value &v) { + PTXBuilder builder; + auto &cvt = *builder.create("cvt.rn.f32.bf16"); + auto res = builder.newOperand("=r"); + auto operand = builder.newOperand(v, "h"); + cvt(res, operand); + return builder.launch(rewriter, loc, f32_ty, false); + } + + static Value convertFp32ToBf16(Location loc, + ConversionPatternRewriter &rewriter, + const Value &v) { + PTXBuilder builder; + auto &cvt = *builder.create("cvt.rn.bf16.f32"); + auto res = builder.newOperand("=h"); + auto operand = builder.newOperand(v, "r"); + cvt(res, operand); + // TODO: This is a hack to get the right type. We should be able to invoke + // the type converter + return builder.launch(rewriter, loc, i16_ty, false); + } + + LogicalResult + matchAndRewrite(triton::FpToFpOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto srcTensorType = op.from().getType().cast(); + auto dstTensorType = op.result().getType().cast(); + auto srcEltType = srcTensorType.getElementType(); + auto dstEltType = dstTensorType.getElementType(); + auto loc = op->getLoc(); + auto elems = getElemsPerThread(dstTensorType); + SmallVector resultVals; + + // Select convertor + if (srcEltType.isa() || + dstEltType.isa()) { + std::function(Location, ConversionPatternRewriter &, + const Value &, const Value &, + const Value &, const Value &)> + convertor; + if (srcEltType.isa() && dstEltType.isF16()) { + convertor = convertFp8x4ToFp16x4; + } else if (srcEltType.isF16() && dstEltType.isa()) { + convertor = convertFp16x4ToFp8x4; + } else if (srcEltType.isa() && dstEltType.isBF16()) { + convertor = convertFp8x4ToBf16x4; + } else if (srcEltType.isBF16() && dstEltType.isa()) { + convertor = convertBf16x4ToFp8x4; + } else if (srcEltType.isa() && dstEltType.isF32()) { + convertor = convertFp8x4ToFp32x4; + } else if (srcEltType.isF32() && dstEltType.isa()) { + convertor = convertFp32x4ToFp8x4; + } else if (srcEltType.isa() && dstEltType.isF64()) { + convertor = convertFp8x4ToFp64x4; + } else if (srcEltType.isF64() && dstEltType.isa()) { + convertor = convertFp64x4ToFp8x4; + } else { + assert(false && "unsupported fp8 casting"); + } + + // Vectorized casting + assert(elems % 4 == 0 && + "FP8 casting only support tensors with 4-aligned sizes"); + auto elements = getElementsFromStruct(loc, adaptor.from(), rewriter); + for (size_t i = 0; i < elems; i += 4) { + auto converted = convertor(loc, rewriter, elements[i], elements[i + 1], + elements[i + 2], elements[i + 3]); + resultVals.append(converted); + } + } else if (srcEltType.isBF16() && dstEltType.isF32()) { + resultVals.emplace_back(convertBf16ToFp32(loc, rewriter, adaptor.from())); + } else if (srcEltType.isF32() && dstEltType.isBF16()) { + resultVals.emplace_back(convertFp32ToBf16(loc, rewriter, adaptor.from())); + } else { + assert(false && "unsupported type casting"); + } + + assert(resultVals.size() == elems); + auto convertedDstTensorType = + this->getTypeConverter()->convertType(dstTensorType); + auto result = getStructFromElements(loc, resultVals, rewriter, + convertedDstTensorType); + rewriter.replaceOp(op, result); + return success(); + } +}; + +template +class ElementwiseOpConversionBase + : public ConvertTritonGPUOpToLLVMPattern { +public: + using OpAdaptor = typename SourceOp::Adaptor; + + explicit ElementwiseOpConversionBase(LLVMTypeConverter &typeConverter, + PatternBenefit benefit = 1) + : ConvertTritonGPUOpToLLVMPattern(typeConverter, benefit) {} + + LogicalResult + matchAndRewrite(SourceOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto resultTy = op.getType(); + Location loc = op->getLoc(); + + unsigned elems = getElemsPerThread(resultTy); + auto resultElementTy = getElementTypeOrSelf(resultTy); + Type elemTy = this->getTypeConverter()->convertType(resultElementTy); + SmallVector types(elems, elemTy); + Type structTy = this->getTypeConverter()->convertType(resultTy); + + auto *concreteThis = static_cast(this); + auto operands = getOperands(rewriter, adaptor, elems, loc); + SmallVector resultVals(elems); + for (unsigned i = 0; i < elems; ++i) { + resultVals[i] = concreteThis->createDestOp(op, adaptor, rewriter, elemTy, + operands[i], loc); + if (!bool(resultVals[i])) + return failure(); + } + Value view = getStructFromElements(loc, resultVals, rewriter, structTy); + rewriter.replaceOp(op, view); + + return success(); + } + +protected: + SmallVector> + getOperands(ConversionPatternRewriter &rewriter, OpAdaptor adaptor, + const unsigned elems, Location loc) const { + SmallVector> operands(elems); + for (auto operand : adaptor.getOperands()) { + auto sub_operands = getElementsFromStruct(loc, operand, rewriter); + for (size_t i = 0; i < elems; ++i) { + operands[i].push_back(sub_operands[i]); + } + } + return operands; + } +}; + +template +struct ElementwiseOpConversion + : public ElementwiseOpConversionBase< + SourceOp, ElementwiseOpConversion> { + using Base = + ElementwiseOpConversionBase>; + using Base::Base; + using OpAdaptor = typename Base::OpAdaptor; + + explicit ElementwiseOpConversion(LLVMTypeConverter &typeConverter, + PatternBenefit benefit = 1) + : ElementwiseOpConversionBase( + typeConverter, benefit) {} + + // An interface to support variant DestOp builder. + DestOp createDestOp(SourceOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter, Type elemTy, + ValueRange operands, Location loc) const { + return rewriter.create(loc, elemTy, operands, + adaptor.getAttributes().getValue()); + } +}; + +struct CmpIOpConversion + : public ElementwiseOpConversionBase { + using Base = + ElementwiseOpConversionBase; + using Base::Base; + using Adaptor = typename Base::OpAdaptor; + + // An interface to support variant DestOp builder. + LLVM::ICmpOp createDestOp(triton::gpu::CmpIOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter, Type elemTy, + ValueRange operands, Location loc) const { + return rewriter.create( + loc, elemTy, ArithCmpIPredicateToLLVM(op.predicate()), operands[0], + operands[1]); + } + + static LLVM::ICmpPredicate + ArithCmpIPredicateToLLVM(arith::CmpIPredicate predicate) { + switch (predicate) { +#define __PRED_ENUM(item__) \ + case arith::CmpIPredicate::item__: \ + return LLVM::ICmpPredicate::item__ + + __PRED_ENUM(eq); + __PRED_ENUM(ne); + __PRED_ENUM(sgt); + __PRED_ENUM(sge); + __PRED_ENUM(slt); + __PRED_ENUM(sle); + __PRED_ENUM(ugt); + __PRED_ENUM(uge); + __PRED_ENUM(ult); + __PRED_ENUM(ule); + +#undef __PRED_ENUM + } + return LLVM::ICmpPredicate::eq; + } +}; + +struct CmpFOpConversion + : public ElementwiseOpConversionBase { + using Base = + ElementwiseOpConversionBase; + using Base::Base; + using Adaptor = typename Base::OpAdaptor; + + // An interface to support variant DestOp builder. + static LLVM::FCmpOp createDestOp(triton::gpu::CmpFOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter, + Type elemTy, ValueRange operands, + Location loc) { + return rewriter.create( + loc, elemTy, ArithCmpFPredicateToLLVM(op.predicate()), operands[0], + operands[1]); + } + + static LLVM::FCmpPredicate + ArithCmpFPredicateToLLVM(arith::CmpFPredicate predicate) { + switch (predicate) { +#define __PRED_ENUM(item__, item1__) \ + case arith::CmpFPredicate::item__: \ + return LLVM::FCmpPredicate::item1__ + + __PRED_ENUM(OEQ, oeq); + __PRED_ENUM(ONE, one); + __PRED_ENUM(OGT, ogt); + __PRED_ENUM(OGE, oge); + __PRED_ENUM(OLT, olt); + __PRED_ENUM(OLE, ole); + __PRED_ENUM(ORD, ord); + __PRED_ENUM(UEQ, ueq); + __PRED_ENUM(UGT, ugt); + __PRED_ENUM(UGE, uge); + __PRED_ENUM(ULT, ult); + __PRED_ENUM(ULE, ule); + __PRED_ENUM(UNE, une); + __PRED_ENUM(UNO, uno); + __PRED_ENUM(AlwaysTrue, _true); + __PRED_ENUM(AlwaysFalse, _false); + +#undef __PRED_ENUM + } + return LLVM::FCmpPredicate::_true; + } +}; + +struct ExtElemwiseOpConversion + : public ElementwiseOpConversionBase { + using Base = ElementwiseOpConversionBase; + using Base::Base; + using Adaptor = typename Base::OpAdaptor; + + Value createDestOp(triton::ExtElemwiseOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter, Type elemTy, + ValueRange operands, Location loc) const { + StringRef funcName = op.symbol(); + if (funcName.empty()) + llvm::errs() << "ExtElemwiseOpConversion"; + + Type funcType = getFunctionType(elemTy, operands); + LLVM::LLVMFuncOp funcOp = + appendOrGetFuncOp(rewriter, op, funcName, funcType); + return rewriter.create(loc, funcOp, operands).getResult(0); + } + +private: + Type getFunctionType(Type resultType, ValueRange operands) const { + SmallVector operandTypes(operands.getTypes()); + return LLVM::LLVMFunctionType::get(resultType, operandTypes); + } + + LLVM::LLVMFuncOp appendOrGetFuncOp(ConversionPatternRewriter &rewriter, + triton::ExtElemwiseOp op, + StringRef funcName, Type funcType) const { + using LLVM::LLVMFuncOp; + + auto funcAttr = StringAttr::get(op->getContext(), funcName); + Operation *funcOp = SymbolTable::lookupNearestSymbolFrom(op, funcAttr); + if (funcOp) + return cast(*funcOp); + + mlir::OpBuilder b(op->getParentOfType()); + auto ret = b.create(op->getLoc(), funcName, funcType); + ret.getOperation()->setAttr( + "libname", StringAttr::get(op->getContext(), op.libname())); + ret.getOperation()->setAttr( + "libpath", StringAttr::get(op->getContext(), op.libpath())); + return ret; + } +}; + +struct FDivOpConversion + : ElementwiseOpConversionBase { + using Base = + ElementwiseOpConversionBase; + using Base::Base; + using Adaptor = typename Base::OpAdaptor; + + Value createDestOp(mlir::arith::DivFOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter, Type elemTy, + ValueRange operands, Location loc) const { + PTXBuilder ptxBuilder; + auto &fdiv = *ptxBuilder.create("div"); + unsigned bitwidth = elemTy.getIntOrFloatBitWidth(); + if (32 == bitwidth) { + fdiv.o("full").o("f32"); + } else if (64 == bitwidth) { + fdiv.o("rn").o("f64"); + } else { + assert(0 && bitwidth && "not supported"); + } + + auto res = ptxBuilder.newOperand(bitwidth == 32 ? "=r" : "=l"); + auto lhs = ptxBuilder.newOperand(operands[0], bitwidth == 32 ? "r" : "l"); + auto rhs = ptxBuilder.newOperand(operands[1], bitwidth == 32 ? "r" : "l"); + fdiv(res, lhs, rhs); + + Value ret = ptxBuilder.launch(rewriter, loc, elemTy, false); + return ret; + } +}; + +struct FMulOpConversion + : ElementwiseOpConversionBase { + using Base = + ElementwiseOpConversionBase; + using Base::Base; + using Adaptor = typename Base::OpAdaptor; + + Value createDestOp(mlir::arith::MulFOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter, Type elemTy, + ValueRange operands, Location loc) const { + auto lhsElemTy = getElementType(op.getLhs()); + auto rhsElemTy = getElementType(op.getRhs()); + if (lhsElemTy.isBF16() && rhsElemTy.isBF16()) { + PTXBuilder builder; + auto ptxAsm = " { .reg .b16 c; \n" + " mov.b16 c, 0x8000U; \n" // 0.0 + " fma.rn.bf16 $0, $1, $2, c; } \n"; + auto &fMul = *builder.create(ptxAsm); + auto res = builder.newOperand("=h"); + auto lhs = builder.newOperand(operands[0], "h"); + auto rhs = builder.newOperand(operands[1], "h"); + fMul({res, lhs, rhs}, /*onlyAttachMLIRArgs=*/true); + return builder.launch(rewriter, loc, i16_ty, false); + } else { + return rewriter.create(loc, elemTy, operands[0], + operands[1]); + } + } +}; + +struct FAddOpConversion + : ElementwiseOpConversionBase { + using Base = + ElementwiseOpConversionBase; + using Base::Base; + using Adaptor = typename Base::OpAdaptor; + + Value createDestOp(mlir::arith::AddFOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter, Type elemTy, + ValueRange operands, Location loc) const { + auto lhsElemTy = getElementType(op.getLhs()); + auto rhsElemTy = getElementType(op.getRhs()); + if (lhsElemTy.isBF16() && rhsElemTy.isBF16()) { + PTXBuilder builder; + auto ptxAsm = "{ .reg .b16 c; \n" + " mov.b16 c, 0x3f80U; \n" // 1.0 + " fma.rn.bf16 $0, $1, c, $2; } \n"; + auto &fAdd = *builder.create(ptxAsm); + auto res = builder.newOperand("=h"); + auto lhs = builder.newOperand(operands[0], "h"); + auto rhs = builder.newOperand(operands[1], "h"); + fAdd({res, lhs, rhs}, /*onlyAttachMLIRArgs=*/true); + return builder.launch(rewriter, loc, i16_ty, false); + } else { + return rewriter.create(loc, elemTy, operands[0], + operands[1]); + } + } +}; + +struct FSubOpConversion + : ElementwiseOpConversionBase { + using Base = + ElementwiseOpConversionBase; + using Base::Base; + using Adaptor = typename Base::OpAdaptor; + + Value createDestOp(mlir::arith::SubFOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter, Type elemTy, + ValueRange operands, Location loc) const { + auto lhsElemTy = getElementType(op.getLhs()); + auto rhsElemTy = getElementType(op.getRhs()); + if (lhsElemTy.isBF16() && rhsElemTy.isBF16()) { + PTXBuilder builder; + auto ptxAsm = " { .reg .b16 c; \n" + " mov.b16 c, 0xbf80U; \n" // -1.0 + " fma.rn.bf16 $0, $2, c, $1;} \n"; + auto &fSub = *builder.create(ptxAsm); + auto res = builder.newOperand("=h"); + auto lhs = builder.newOperand(operands[0], "h"); + auto rhs = builder.newOperand(operands[1], "h"); + fSub({res, lhs, rhs}, /*onlyAttachMLIRArgs=*/true); + return builder.launch(rewriter, loc, i16_ty, false); + } else { + return rewriter.create(loc, elemTy, operands[0], + operands[1]); + } + } +}; + +struct SIToFPOpConversion + : ElementwiseOpConversionBase { + using Base = + ElementwiseOpConversionBase; + using Base::Base; + using Adaptor = typename Base::OpAdaptor; + + Value createDestOp(mlir::arith::SIToFPOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter, Type elemTy, + ValueRange operands, Location loc) const { + auto outElemTy = getElementType(op.getOut()); + if (outElemTy.isBF16()) { + auto value = rewriter.create(loc, f32_ty, operands[0]); + return FpToFpOpConversion::convertFp32ToBf16(loc, rewriter, value); + } else { + return rewriter.create(loc, elemTy, operands[0]); + } + } +}; + +struct FPToSIOpConversion + : ElementwiseOpConversionBase { + using Base = + ElementwiseOpConversionBase; + using Base::Base; + using Adaptor = typename Base::OpAdaptor; + + Value createDestOp(mlir::arith::FPToSIOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter, Type elemTy, + ValueRange operands, Location loc) const { + auto inElemTy = getElementType(op.getIn()); + if (inElemTy.isBF16()) { + auto value = + FpToFpOpConversion::convertBf16ToFp32(loc, rewriter, operands[0]); + return rewriter.create(loc, elemTy, value); + } else { + return rewriter.create(loc, elemTy, operands[0]); + } + } +}; + +struct ExtFOpConversion + : ElementwiseOpConversionBase { + using Base = + ElementwiseOpConversionBase; + using Base::Base; + using Adaptor = typename Base::OpAdaptor; + + Value createDestOp(mlir::arith::ExtFOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter, Type elemTy, + ValueRange operands, Location loc) const { + auto inElemTy = getElementType(op.getIn()); + if (inElemTy.isBF16()) { + auto outElemTy = getElementType(op.getOut()); + assert(outElemTy.isF32() && "unsupported conversion"); + return FpToFpOpConversion::convertBf16ToFp32(loc, rewriter, operands[0]); + } else { + return rewriter.create(loc, elemTy, operands[0]); + } + } +}; + +struct TruncFOpConversion + : ElementwiseOpConversionBase { + using Base = + ElementwiseOpConversionBase; + using Base::Base; + using Adaptor = typename Base::OpAdaptor; + + Value createDestOp(mlir::arith::TruncFOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter, Type elemTy, + ValueRange operands, Location loc) const { + auto outElemTy = getElementType(op.getOut()); + if (outElemTy.isBF16()) { + auto inElemTy = getElementType(op.getIn()); + assert(inElemTy.isF32() && "unsupported conversion"); + return FpToFpOpConversion::convertFp32ToBf16(loc, rewriter, operands[0]); + } else { + return rewriter.create(loc, elemTy, operands[0]); + } + } +}; + +struct ExpOpConversionApprox + : ElementwiseOpConversionBase { + using Base = + ElementwiseOpConversionBase; + using Base::Base; + using Adaptor = typename Base::OpAdaptor; + + Value createDestOp(mlir::math::ExpOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter, Type elemTy, + ValueRange operands, Location loc) const { + // For FP64 input, call __nv_expf for higher-precision calculation + if (elemTy.getIntOrFloatBitWidth() == 64) + return {}; + + const double log2e = 1.4426950408889634; + Value prod = fmul(f32_ty, operands[0], f32_val(log2e)); + + PTXBuilder ptxBuilder; + auto &exp2 = ptxBuilder.create("ex2")->o("approx").o("f32"); + auto output = ptxBuilder.newOperand("=f"); + auto input = ptxBuilder.newOperand(prod, "f"); + exp2(output, input); + return ptxBuilder.launch(rewriter, loc, f32_ty, false); + } +}; + +void populateElementwiseOpToLLVMPatterns(mlir::LLVMTypeConverter &typeConverter, + RewritePatternSet &patterns, + int numWarps, + AxisInfoAnalysis &axisInfoAnalysis, + const Allocation *allocation, + Value smem, PatternBenefit benefit) { +#define POPULATE_TERNARY_OP(SRC_OP, DST_OP) \ + patterns.add>(typeConverter, benefit); + POPULATE_TERNARY_OP(triton::gpu::SelectOp, LLVM::SelectOp) +#undef POPULATE_TERNARY_OP + +#define POPULATE_BINARY_OP(SRC_OP, DST_OP) \ + patterns.add>(typeConverter, benefit); + POPULATE_BINARY_OP(arith::SubIOp, LLVM::SubOp) // - + POPULATE_BINARY_OP(arith::AddIOp, LLVM::AddOp) // + + POPULATE_BINARY_OP(arith::MulIOp, LLVM::MulOp) // * + POPULATE_BINARY_OP(arith::DivSIOp, LLVM::SDivOp) + POPULATE_BINARY_OP(arith::DivUIOp, LLVM::UDivOp) + POPULATE_BINARY_OP(arith::RemFOp, LLVM::FRemOp) // % + POPULATE_BINARY_OP(arith::RemSIOp, LLVM::SRemOp) + POPULATE_BINARY_OP(arith::RemUIOp, LLVM::URemOp) + POPULATE_BINARY_OP(arith::AndIOp, LLVM::AndOp) // & + POPULATE_BINARY_OP(arith::OrIOp, LLVM::OrOp) // | + POPULATE_BINARY_OP(arith::XOrIOp, LLVM::XOrOp) // ^ + POPULATE_BINARY_OP(arith::ShLIOp, LLVM::ShlOp) // << + POPULATE_BINARY_OP(arith::ShRSIOp, LLVM::AShrOp) // >> + POPULATE_BINARY_OP(arith::ShRUIOp, LLVM::LShrOp) // >> +#undef POPULATE_BINARY_OP + +#define POPULATE_UNARY_OP(SRC_OP, DST_OP) \ + patterns.add>(typeConverter, benefit); + POPULATE_UNARY_OP(arith::TruncIOp, LLVM::TruncOp) + POPULATE_UNARY_OP(arith::ExtSIOp, LLVM::SExtOp) + POPULATE_UNARY_OP(arith::ExtUIOp, LLVM::ZExtOp) + POPULATE_UNARY_OP(arith::FPToUIOp, LLVM::FPToUIOp) + POPULATE_UNARY_OP(arith::UIToFPOp, LLVM::UIToFPOp) + POPULATE_UNARY_OP(math::LogOp, math::LogOp) + POPULATE_UNARY_OP(math::CosOp, math::CosOp) + POPULATE_UNARY_OP(math::SinOp, math::SinOp) + POPULATE_UNARY_OP(math::SqrtOp, math::SqrtOp) + POPULATE_UNARY_OP(math::ExpOp, math::ExpOp) + POPULATE_UNARY_OP(triton::BitcastOp, LLVM::BitcastOp) + POPULATE_UNARY_OP(triton::IntToPtrOp, LLVM::IntToPtrOp) + POPULATE_UNARY_OP(triton::PtrToIntOp, LLVM::PtrToIntOp) +#undef POPULATE_UNARY_OP + + patterns.add(typeConverter, benefit); + patterns.add(typeConverter, benefit); + + patterns.add(typeConverter, benefit); + patterns.add(typeConverter, benefit); + patterns.add(typeConverter, benefit); + patterns.add(typeConverter, benefit); + + patterns.add(typeConverter, benefit); + patterns.add(typeConverter, benefit); + patterns.add(typeConverter, benefit); + patterns.add(typeConverter, benefit); + + patterns.add(typeConverter, benefit); + + patterns.add(typeConverter, benefit); + // ExpOpConversionApprox will try using ex2.approx if the input type is FP32. + // For FP64 input type, ExpOpConversionApprox will return failure and + // ElementwiseOpConversion defined below will call + // __nv_expf for higher-precision calculation + patterns.add(typeConverter, benefit); +} diff --git a/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.h b/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.h new file mode 100644 index 000000000000..430fada0730c --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/ElementwiseOpToLLVM.h @@ -0,0 +1,16 @@ +#ifndef TRITON_CONVERSION_TRITONGPU_TO_ELEMENTWISE_OP_H +#define TRITON_CONVERSION_TRITONGPU_TO_ELEMENTWISE_OP_H + +#include "TritonGPUToLLVMBase.h" + +using namespace mlir; +using namespace mlir::triton; + +void populateElementwiseOpToLLVMPatterns(mlir::LLVMTypeConverter &typeConverter, + RewritePatternSet &patterns, + int numWarps, + AxisInfoAnalysis &axisInfoAnalysis, + const Allocation *allocation, + Value smem, PatternBenefit benefit); + +#endif diff --git a/lib/Conversion/TritonGPUToLLVM/LoadStoreOpToLLVM.cpp b/lib/Conversion/TritonGPUToLLVM/LoadStoreOpToLLVM.cpp new file mode 100644 index 000000000000..97ce9457a6d3 --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/LoadStoreOpToLLVM.cpp @@ -0,0 +1,884 @@ +#include "mlir/IR/Matchers.h" +#include "mlir/IR/TypeUtilities.h" + +#include "ConvertLayoutOpToLLVM.h" +#include "LoadStoreOpToLLVM.h" + +using namespace mlir; +using namespace mlir::triton; + +using ::mlir::LLVM::getElementsFromStruct; +using ::mlir::LLVM::getSharedMemoryObjectFromStruct; +using ::mlir::LLVM::getStructFromElements; +using ::mlir::triton::gpu::getElemsPerThread; +using ::mlir::triton::gpu::SharedEncodingAttr; + +// Contains some helper functions for both Load and Store conversions. +struct LoadStoreConversionBase : public ConvertTritonGPUOpToLLVMPatternBase { + explicit LoadStoreConversionBase(AxisInfoAnalysis &axisAnalysisPass) + : axisAnalysisPass(axisAnalysisPass) {} + + // Get corresponding LLVM element values of \param value. + static SmallVector getLLVMElems(Value value, Value llValue, + ConversionPatternRewriter &rewriter, + Location loc) { + if (!value) + return {}; + if (!llValue.getType().isa()) + return {llValue}; + // Here, we assume that all inputs should have a blockedLayout + auto valueVals = getElementsFromStruct(loc, llValue, rewriter); + return valueVals; + } + + unsigned getVectorSize(Value ptr) const { + return axisAnalysisPass.getPtrVectorSize(ptr); + } + + unsigned getMaskAlignment(Value mask) const { + return axisAnalysisPass.getMaskAlignment(mask); + } + +protected: + AxisInfoAnalysis &axisAnalysisPass; +}; + +struct LoadOpConversion + : public ConvertTritonGPUOpToLLVMPattern, + public LoadStoreConversionBase { + using ConvertTritonGPUOpToLLVMPattern< + triton::LoadOp>::ConvertTritonGPUOpToLLVMPattern; + + LoadOpConversion(LLVMTypeConverter &converter, + AxisInfoAnalysis &axisAnalysisPass, PatternBenefit benefit) + : ConvertTritonGPUOpToLLVMPattern(converter, benefit), + LoadStoreConversionBase(axisAnalysisPass) {} + + LogicalResult + matchAndRewrite(triton::LoadOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto loc = op->getLoc(); + + // original values + Value ptr = op.ptr(); + Value mask = op.mask(); + Value other = op.other(); + + // adaptor values + Value llPtr = adaptor.ptr(); + Value llMask = adaptor.mask(); + Value llOther = adaptor.other(); + + // Determine the vectorization size + Type valueTy = op.getResult().getType(); + Type valueElemTy = + typeConverter->convertType(getElementTypeOrSelf(valueTy)); + unsigned vec = getVectorSize(ptr); + unsigned numElems = getElemsPerThread(ptr.getType()); + if (llMask) + vec = std::min(vec, getMaskAlignment(mask)); + + // Get the LLVM values for pointers + auto ptrElems = getLLVMElems(ptr, llPtr, rewriter, loc); + assert(ptrElems.size() == numElems); + + // Get the LLVM values for mask + SmallVector maskElems; + if (llMask) { + maskElems = getLLVMElems(mask, llMask, rewriter, loc); + assert(maskElems.size() == numElems); + } + + // Get the LLVM values for `other` + // TODO: (goostavz) handle when other is const but not splat, which + // should be rarely seen + bool otherIsSplatConstInt = false; + DenseElementsAttr constAttr; + int64_t splatVal = 0; + if (other && valueElemTy.isa() && + matchPattern(other, m_Constant(&constAttr)) && constAttr.isSplat()) { + otherIsSplatConstInt = true; + splatVal = constAttr.getSplatValue().getSExtValue(); + } + auto otherElems = getLLVMElems(other, llOther, rewriter, loc); + + // vectorized iteration through all the pointer/mask/other elements + const int valueElemNbits = + std::max(8u, valueElemTy.getIntOrFloatBitWidth()); + const int numVecs = numElems / vec; + + SmallVector loadedVals; + for (size_t vecStart = 0; vecStart < numElems; vecStart += vec) { + // TODO: optimization when ptr is GEP with constant offset + size_t in_off = 0; + + const size_t maxWordWidth = std::max(32, valueElemNbits); + const size_t totalWidth = valueElemNbits * vec; + const size_t width = std::min(totalWidth, maxWordWidth); + const size_t nWords = std::max(1, totalWidth / width); + const size_t wordNElems = width / valueElemNbits; + assert(wordNElems * nWords * numVecs == numElems); + + // TODO(Superjomn) Add cache policy fields to StoreOp. + // TODO(Superjomn) Deal with cache policy here. + const bool hasL2EvictPolicy = false; + + PTXBuilder ptxBuilder; + + Value pred = mask ? maskElems[vecStart] : int_val(1, 1); + + const std::string readConstraint = + (width == 64) ? "l" : ((width == 32) ? "r" : "c"); + const std::string writeConstraint = + (width == 64) ? "=l" : ((width == 32) ? "=r" : "=c"); + + // prepare asm operands + auto *dstsOpr = ptxBuilder.newListOperand(); + for (size_t wordIdx = 0; wordIdx < nWords; ++wordIdx) { + auto *opr = ptxBuilder.newOperand(writeConstraint); // =r operations + dstsOpr->listAppend(opr); + } + + auto *addrOpr = + ptxBuilder.newAddrOperand(ptrElems[vecStart], "l", in_off); + + // Define the instruction opcode + auto &ld = ptxBuilder.create<>("ld") + ->o("volatile", op.isVolatile()) + .global() + .o("ca", op.cache() == triton::CacheModifier::CA) + .o("cg", op.cache() == triton::CacheModifier::CG) + .o("L1::evict_first", + op.evict() == triton::EvictionPolicy::EVICT_FIRST) + .o("L1::evict_last", + op.evict() == triton::EvictionPolicy::EVICT_LAST) + .o("L1::cache_hint", hasL2EvictPolicy) + .v(nWords) + .b(width); + + PTXBuilder::Operand *evictOpr{}; + + // Here lack a mlir::Value to bind to this operation, so disabled. + // if (has_l2_evict_policy) + // evictOpr = ptxBuilder.newOperand(l2Evict, "l"); + + if (!evictOpr) + ld(dstsOpr, addrOpr).predicate(pred, "b"); + else + ld(dstsOpr, addrOpr, evictOpr).predicate(pred, "b"); + + if (other) { + for (size_t ii = 0; ii < nWords; ++ii) { + // PTX doesn't support mov.u8, so we need to use mov.u16 + auto movWidth = width < 16 ? 16 : width; + PTXInstr &mov = + ptxBuilder.create<>("mov")->o("u" + std::to_string(movWidth)); + + size_t size = width / valueElemNbits; + + auto vecTy = LLVM::getFixedVectorType(valueElemTy, size); + Value v = undef(vecTy); + for (size_t s = 0; s < size; ++s) { + Value falseVal = otherElems[vecStart + ii * size + s]; + Value sVal = createIndexAttrConstant( + rewriter, loc, this->getTypeConverter()->getIndexType(), s); + v = insert_element(vecTy, v, falseVal, sVal); + } + v = bitcast(v, IntegerType::get(getContext(), width)); + + PTXInstr::Operand *opr{}; + if (otherIsSplatConstInt) + opr = ptxBuilder.newConstantOperand(splatVal); + else + opr = ptxBuilder.newOperand(v, readConstraint); + + mov(dstsOpr->listGet(ii), opr).predicateNot(pred, "b"); + } + } + + // Create inline ASM signature + SmallVector retTys(nWords, IntegerType::get(getContext(), width)); + Type retTy = retTys.size() > 1 + ? LLVM::LLVMStructType::getLiteral(getContext(), retTys) + : retTys[0]; + + // TODO: if (has_l2_evict_policy) + // auto asmDialectAttr = + // LLVM::AsmDialectAttr::get(rewriter.getContext(), + // LLVM::AsmDialect::AD_ATT); + Value ret = ptxBuilder.launch(rewriter, loc, retTy); + + // Extract and store return values + SmallVector rets; + for (unsigned int ii = 0; ii < nWords; ++ii) { + Value curr; + if (retTy.isa()) { + curr = extract_val(IntegerType::get(getContext(), width), ret, + rewriter.getI64ArrayAttr(ii)); + } else { + curr = ret; + } + curr = bitcast(curr, LLVM::getFixedVectorType(valueElemTy, + width / valueElemNbits)); + rets.push_back(curr); + } + int tmp = width / valueElemNbits; + for (size_t ii = 0; ii < vec; ++ii) { + Value vecIdx = createIndexAttrConstant( + rewriter, loc, this->getTypeConverter()->getIndexType(), ii % tmp); + Value loaded = extract_element(valueElemTy, rets[ii / tmp], vecIdx); + loadedVals.push_back(loaded); + } + } // end vec + + Type llvmResultStructTy = getTypeConverter()->convertType(valueTy); + Value resultStruct = + getStructFromElements(loc, loadedVals, rewriter, llvmResultStructTy); + rewriter.replaceOp(op, {resultStruct}); + return success(); + } +}; + +struct StoreOpConversion + : public ConvertTritonGPUOpToLLVMPattern, + public LoadStoreConversionBase { + using ConvertTritonGPUOpToLLVMPattern< + triton::StoreOp>::ConvertTritonGPUOpToLLVMPattern; + + StoreOpConversion(LLVMTypeConverter &converter, + AxisInfoAnalysis &axisAnalysisPass, PatternBenefit benefit) + : ConvertTritonGPUOpToLLVMPattern(converter, benefit), + LoadStoreConversionBase(axisAnalysisPass) {} + + LogicalResult + matchAndRewrite(triton::StoreOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Value ptr = op.ptr(); + Value mask = op.mask(); + Value value = op.value(); + + Value llPtr = adaptor.ptr(); + Value llMask = adaptor.mask(); + Value llValue = adaptor.value(); + + auto loc = op->getLoc(); + MLIRContext *ctx = rewriter.getContext(); + + auto valueTy = value.getType(); + Type valueElemTy = + typeConverter->convertType(getElementTypeOrSelf(valueTy)); + + unsigned vec = getVectorSize(ptr); + unsigned numElems = getElemsPerThread(ptr.getType()); + + auto ptrElems = getLLVMElems(ptr, llPtr, rewriter, loc); + auto valueElems = getLLVMElems(value, llValue, rewriter, loc); + assert(ptrElems.size() == valueElems.size()); + + // Determine the vectorization size + SmallVector maskElems; + if (llMask) { + maskElems = getLLVMElems(mask, llMask, rewriter, loc); + assert(valueElems.size() == maskElems.size()); + + unsigned maskAlign = getMaskAlignment(mask); + vec = std::min(vec, maskAlign); + } + + const size_t dtsize = + std::max(1, valueElemTy.getIntOrFloatBitWidth() / 8); + const size_t valueElemNbits = dtsize * 8; + + const int numVecs = numElems / vec; + for (size_t vecStart = 0; vecStart < numElems; vecStart += vec) { + // TODO: optimization when ptr is AddPtr with constant offset + size_t in_off = 0; + + const size_t maxWordWidth = std::max(32, valueElemNbits); + const size_t totalWidth = valueElemNbits * vec; + const size_t width = std::min(totalWidth, maxWordWidth); + const size_t nWords = std::max(1, totalWidth / width); + const size_t wordNElems = width / valueElemNbits; + assert(wordNElems * nWords * numVecs == numElems); + + // TODO(Superjomn) Add cache policy fields to StoreOp. + // TODO(Superjomn) Deal with cache policy here. + + Type valArgTy = IntegerType::get(ctx, width); + auto wordTy = vec_ty(valueElemTy, wordNElems); + + SmallVector> asmArgs; + for (size_t wordIdx = 0; wordIdx < nWords; ++wordIdx) { + // llWord is a width-len composition + Value llWord = undef(wordTy); + // Insert each value element to the composition + for (size_t elemIdx = 0; elemIdx < wordNElems; ++elemIdx) { + const size_t elemOffset = vecStart + wordIdx * wordNElems + elemIdx; + assert(elemOffset < valueElems.size()); + Value elem = valueElems[elemOffset]; + if (elem.getType().isInteger(1)) + elem = rewriter.create(loc, type::i8Ty(ctx), elem); + elem = bitcast(elem, valueElemTy); + + Type u32Ty = typeConverter->convertType(type::u32Ty(ctx)); + llWord = insert_element(wordTy, llWord, elem, i32_val(elemIdx)); + } + llWord = bitcast(llWord, valArgTy); + std::string constraint = + (width == 64) ? "l" : ((width == 32) ? "r" : "c"); + asmArgs.emplace_back(llWord, constraint); + } + + // Prepare the PTX inline asm. + PTXBuilder ptxBuilder; + auto *asmArgList = ptxBuilder.newListOperand(asmArgs); + + Value maskVal = llMask ? maskElems[vecStart] : int_val(1, 1); + + auto *asmAddr = + ptxBuilder.newAddrOperand(ptrElems[vecStart], "l", in_off); + + auto &ptxStoreInstr = + ptxBuilder.create<>("st")->global().v(nWords).b(width); + ptxStoreInstr(asmAddr, asmArgList).predicate(maskVal, "b"); + + Type boolTy = getTypeConverter()->convertType(rewriter.getIntegerType(1)); + llvm::SmallVector argTys({boolTy, ptr.getType()}); + argTys.insert(argTys.end(), nWords, valArgTy); + + auto asmReturnTy = void_ty(ctx); + + ptxBuilder.launch(rewriter, loc, asmReturnTy); + } + rewriter.eraseOp(op); + return success(); + } +}; + +struct AtomicCASOpConversion + : public ConvertTritonGPUOpToLLVMPattern, + public LoadStoreConversionBase { + using ConvertTritonGPUOpToLLVMPattern< + triton::AtomicCASOp>::ConvertTritonGPUOpToLLVMPattern; + + AtomicCASOpConversion(LLVMTypeConverter &converter, + const Allocation *allocation, Value smem, + AxisInfoAnalysis &axisAnalysisPass, + PatternBenefit benefit) + : ConvertTritonGPUOpToLLVMPattern( + converter, allocation, smem, benefit), + LoadStoreConversionBase(axisAnalysisPass) {} + + LogicalResult + matchAndRewrite(triton::AtomicCASOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto loc = op.getLoc(); + MLIRContext *ctx = rewriter.getContext(); + Value ptr = op.ptr(); + + Value llPtr = adaptor.ptr(); + Value llCmp = adaptor.cmp(); + Value llVal = adaptor.val(); + + auto ptrElements = getElementsFromStruct(loc, llPtr, rewriter); + auto cmpElements = getElementsFromStruct(loc, llCmp, rewriter); + auto valElements = getElementsFromStruct(loc, llVal, rewriter); + + auto valueTy = op.getResult().getType().dyn_cast(); + Type valueElemTy = + valueTy ? getTypeConverter()->convertType(valueTy.getElementType()) + : op.getResult().getType(); + auto tid = tid_val(); + Value pred = icmp_eq(tid, i32_val(0)); + PTXBuilder ptxBuilderMemfence; + auto memfence = ptxBuilderMemfence.create("membar")->o("gl"); + memfence(); + auto ASMReturnTy = void_ty(ctx); + ptxBuilderMemfence.launch(rewriter, loc, ASMReturnTy); + + Value atomPtr = getSharedMemoryBase(loc, rewriter, op.getOperation()); + atomPtr = bitcast(atomPtr, ptr_ty(valueElemTy, 3)); + + Value casPtr = ptrElements[0]; + Value casCmp = cmpElements[0]; + Value casVal = valElements[0]; + + PTXBuilder ptxBuilderAtomicCAS; + auto *dstOpr = ptxBuilderAtomicCAS.newOperand("=r"); + auto *ptrOpr = ptxBuilderAtomicCAS.newAddrOperand(casPtr, "l"); + auto *cmpOpr = ptxBuilderAtomicCAS.newOperand(casCmp, "r"); + auto *valOpr = ptxBuilderAtomicCAS.newOperand(casVal, "r"); + auto &atom = *ptxBuilderAtomicCAS.create("atom"); + atom.global().o("cas").o("b32"); + atom(dstOpr, ptrOpr, cmpOpr, valOpr).predicate(pred); + auto old = ptxBuilderAtomicCAS.launch(rewriter, loc, valueElemTy); + barrier(); + + PTXBuilder ptxBuilderStore; + auto *dstOprStore = ptxBuilderStore.newAddrOperand(atomPtr, "l"); + auto *valOprStore = ptxBuilderStore.newOperand(old, "r"); + auto &st = *ptxBuilderStore.create("st"); + st.shared().o("b32"); + st(dstOprStore, valOprStore).predicate(pred); + ptxBuilderStore.launch(rewriter, loc, ASMReturnTy); + ptxBuilderMemfence.launch(rewriter, loc, ASMReturnTy); + barrier(); + Value ret = load(atomPtr); + barrier(); + rewriter.replaceOp(op, {ret}); + return success(); + } +}; + +struct AtomicRMWOpConversion + : public ConvertTritonGPUOpToLLVMPattern, + public LoadStoreConversionBase { + using ConvertTritonGPUOpToLLVMPattern< + triton::AtomicRMWOp>::ConvertTritonGPUOpToLLVMPattern; + + AtomicRMWOpConversion(LLVMTypeConverter &converter, + const Allocation *allocation, Value smem, + AxisInfoAnalysis &axisAnalysisPass, + PatternBenefit benefit) + : ConvertTritonGPUOpToLLVMPattern( + converter, allocation, smem, benefit), + LoadStoreConversionBase(axisAnalysisPass) {} + + LogicalResult + matchAndRewrite(triton::AtomicRMWOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto loc = op.getLoc(); + MLIRContext *ctx = rewriter.getContext(); + + auto atomicRmwAttr = op.atomic_rmw_op(); + Value ptr = op.ptr(); + Value val = op.val(); + + Value llPtr = adaptor.ptr(); + Value llVal = adaptor.val(); + Value llMask = adaptor.mask(); + + auto valElements = getElementsFromStruct(loc, llVal, rewriter); + auto ptrElements = getElementsFromStruct(loc, llPtr, rewriter); + auto maskElements = getElementsFromStruct(loc, llMask, rewriter); + + auto valueTy = op.getResult().getType().dyn_cast(); + Type valueElemTy = + valueTy ? getTypeConverter()->convertType(valueTy.getElementType()) + : op.getResult().getType(); + const size_t valueElemNbits = valueElemTy.getIntOrFloatBitWidth(); + auto elemsPerThread = getElemsPerThread(val.getType()); + // vec = 1 for scalar + auto vec = getVectorSize(ptr); + Value mask = int_val(1, 1); + auto tid = tid_val(); + // tensor + if (valueTy) { + auto valTy = val.getType().cast(); + vec = std::min(vec, valTy.getElementType().isF16() ? 2 : 1); + // mask + auto shape = valueTy.getShape(); + auto numElements = product(shape); + mask = and_(mask, icmp_slt(mul(tid, i32_val(elemsPerThread)), + i32_val(numElements))); + } + + auto vecTy = vec_ty(valueElemTy, vec); + SmallVector resultVals(elemsPerThread); + for (size_t i = 0; i < elemsPerThread; i += vec) { + Value rmwVal = undef(vecTy); + for (int ii = 0; ii < vec; ++ii) { + Value iiVal = createIndexAttrConstant( + rewriter, loc, getTypeConverter()->getIndexType(), ii); + rmwVal = insert_element(vecTy, rmwVal, valElements[i + ii], iiVal); + } + + Value rmwPtr = ptrElements[i]; + Value rmwMask = maskElements[i]; + rmwMask = and_(rmwMask, mask); + std::string sTy; + PTXBuilder ptxBuilderAtomicRMW; + std::string tyId = valueElemNbits * vec == 64 + ? "l" + : (valueElemNbits * vec == 32 ? "r" : "h"); + auto *dstOpr = ptxBuilderAtomicRMW.newOperand("=" + tyId); + auto *ptrOpr = ptxBuilderAtomicRMW.newAddrOperand(rmwPtr, "l"); + auto *valOpr = ptxBuilderAtomicRMW.newOperand(rmwVal, tyId); + + auto &atom = ptxBuilderAtomicRMW.create<>("atom")->global().o("gpu"); + auto rmwOp = stringifyRMWOp(atomicRmwAttr).str(); + auto sBits = std::to_string(valueElemNbits); + switch (atomicRmwAttr) { + case RMWOp::AND: + sTy = "b" + sBits; + break; + case RMWOp::OR: + sTy = "b" + sBits; + break; + case RMWOp::XOR: + sTy = "b" + sBits; + break; + case RMWOp::ADD: + sTy = "s" + sBits; + break; + case RMWOp::FADD: + rmwOp = "add"; + rmwOp += (valueElemNbits == 16 ? ".noftz" : ""); + sTy = "f" + sBits; + sTy += (vec == 2 && valueElemNbits == 16) ? "x2" : ""; + break; + case RMWOp::MAX: + sTy = "s" + sBits; + break; + case RMWOp::MIN: + sTy = "s" + sBits; + break; + case RMWOp::UMAX: + rmwOp = "max"; + sTy = "u" + sBits; + break; + case RMWOp::UMIN: + rmwOp = "min"; + sTy = "u" + sBits; + break; + case RMWOp::XCHG: + sTy = "b" + sBits; + break; + default: + return failure(); + } + atom.o(rmwOp).o(sTy); + if (valueTy) { + atom(dstOpr, ptrOpr, valOpr).predicate(rmwMask); + auto retType = vec == 1 ? valueElemTy : vecTy; + auto ret = ptxBuilderAtomicRMW.launch(rewriter, loc, retType); + for (int ii = 0; ii < vec; ++ii) { + resultVals[i + ii] = + vec == 1 ? ret : extract_element(valueElemTy, ret, idx_val(ii)); + } + } else { + PTXBuilder ptxBuilderMemfence; + auto memfenc = ptxBuilderMemfence.create("membar")->o("gl"); + memfenc(); + auto ASMReturnTy = void_ty(ctx); + ptxBuilderMemfence.launch(rewriter, loc, ASMReturnTy); + rmwMask = and_(rmwMask, icmp_eq(tid, i32_val(0))); + atom(dstOpr, ptrOpr, valOpr).predicate(rmwMask); + auto old = ptxBuilderAtomicRMW.launch(rewriter, loc, valueElemTy); + Value atomPtr = getSharedMemoryBase(loc, rewriter, op.getOperation()); + atomPtr = bitcast(atomPtr, ptr_ty(valueElemTy, 3)); + store(old, atomPtr); + barrier(); + Value ret = load(atomPtr); + barrier(); + rewriter.replaceOp(op, {ret}); + } + } + if (valueTy) { + Type structTy = getTypeConverter()->convertType(valueTy); + Value resultStruct = + getStructFromElements(loc, resultVals, rewriter, structTy); + rewriter.replaceOp(op, {resultStruct}); + } + return success(); + } +}; + +struct InsertSliceOpConversion + : public ConvertTritonGPUOpToLLVMPattern { + using ConvertTritonGPUOpToLLVMPattern< + tensor::InsertSliceOp>::ConvertTritonGPUOpToLLVMPattern; + + LogicalResult + matchAndRewrite(tensor::InsertSliceOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + // %dst = insert_slice %src into %dst[%offsets] + Location loc = op->getLoc(); + Value dst = op.dest(); + Value src = op.source(); + Value res = op.result(); + assert(allocation->getBufferId(res) == Allocation::InvalidBufferId && + "Only support in-place insert_slice for now"); + + auto srcTy = src.getType().dyn_cast(); + auto srcLayout = srcTy.getEncoding().dyn_cast(); + auto srcShape = srcTy.getShape(); + assert(srcLayout && "Unexpected srcLayout in InsertSliceOpConversion"); + + auto dstTy = dst.getType().dyn_cast(); + auto dstLayout = dstTy.getEncoding().dyn_cast(); + auto llDst = adaptor.dest(); + assert(dstLayout && "Unexpected dstLayout in InsertSliceOpConversion"); + assert(op.hasUnitStride() && + "Only unit stride supported by InsertSliceOpConversion"); + + // newBase = base + offset + // Triton support either static and dynamic offsets + auto smemObj = getSharedMemoryObjectFromStruct(loc, llDst, rewriter); + SmallVector offsets; + SmallVector srcStrides; + auto mixedOffsets = op.getMixedOffsets(); + for (auto i = 0; i < mixedOffsets.size(); ++i) { + if (op.isDynamicOffset(i)) { + offsets.emplace_back(adaptor.offsets()[i]); + } else { + offsets.emplace_back(i32_val(op.getStaticOffset(i))); + } + // Like insert_slice_async, we only support slice from one dimension, + // which has a slice size of 1 + if (op.getStaticSize(i) != 1) { + srcStrides.emplace_back(smemObj.strides[i]); + } + } + + // Compute the offset based on the original strides of the shared memory + // object + auto offset = dot(rewriter, loc, offsets, smemObj.strides); + auto elemTy = getTypeConverter()->convertType(dstTy.getElementType()); + auto elemPtrTy = ptr_ty(elemTy, 3); + auto smemBase = gep(elemPtrTy, smemObj.base, offset); + + auto llSrc = adaptor.source(); + auto srcIndices = + emitBaseIndexForBlockedLayout(loc, rewriter, srcLayout, srcShape); + storeBlockedToShared(src, llSrc, srcStrides, srcIndices, dst, smemBase, + elemTy, loc, rewriter); + // Barrier is not necessary. + // The membar pass knows that it writes to shared memory and will handle it + // properly. + rewriter.replaceOp(op, llDst); + return success(); + } +}; + +struct InsertSliceAsyncOpConversion + : public ConvertTritonGPUOpToLLVMPattern, + public LoadStoreConversionBase { + using ConvertTritonGPUOpToLLVMPattern< + triton::gpu::InsertSliceAsyncOp>::ConvertTritonGPUOpToLLVMPattern; + + InsertSliceAsyncOpConversion(LLVMTypeConverter &converter, + const Allocation *allocation, Value smem, + AxisInfoAnalysis &axisAnalysisPass, + PatternBenefit benefit) + : ConvertTritonGPUOpToLLVMPattern( + converter, allocation, smem, benefit), + LoadStoreConversionBase(axisAnalysisPass) {} + + LogicalResult + matchAndRewrite(triton::gpu::InsertSliceAsyncOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + // insert_slice_async %src, %dst, %index, %mask, %other + auto loc = op.getLoc(); + Value src = op.src(); + Value dst = op.dst(); + Value res = op.result(); + Value mask = op.mask(); + Value other = op.other(); + assert(allocation->getBufferId(res) == Allocation::InvalidBufferId && + "Only support in-place insert_slice_async for now"); + + auto srcTy = src.getType().cast(); + auto resTy = dst.getType().cast(); + auto resElemTy = getTypeConverter()->convertType(resTy.getElementType()); + auto srcBlockedLayout = srcTy.getEncoding().cast(); + auto resSharedLayout = resTy.getEncoding().cast(); + auto srcShape = srcTy.getShape(); + assert(srcShape.size() == 2 && + "insert_slice_async: Unexpected rank of %src"); + + Value llDst = adaptor.dst(); + Value llSrc = adaptor.src(); + Value llMask = adaptor.mask(); + Value llOther = adaptor.other(); + Value llIndex = adaptor.index(); + + // %src + auto srcElems = getLLVMElems(src, llSrc, rewriter, loc); + + // %dst + auto dstTy = dst.getType().cast(); + auto dstShape = dstTy.getShape(); + auto smemObj = getSharedMemoryObjectFromStruct(loc, llDst, rewriter); + auto axis = op->getAttrOfType("axis").getInt(); + SmallVector offsetVals; + SmallVector srcStrides; + for (auto i = 0; i < dstShape.size(); ++i) { + if (i == axis) { + offsetVals.emplace_back(llIndex); + } else { + offsetVals.emplace_back(i32_val(0)); + srcStrides.emplace_back(smemObj.strides[i]); + } + } + // Compute the offset based on the original dimensions of the shared + // memory object + auto dstOffset = dot(rewriter, loc, offsetVals, smemObj.strides); + auto dstPtrTy = ptr_ty(resElemTy, 3); + Value dstPtrBase = gep(dstPtrTy, smemObj.base, dstOffset); + + // %mask + SmallVector maskElems; + if (llMask) { + maskElems = getLLVMElems(mask, llMask, rewriter, loc); + assert(srcElems.size() == maskElems.size()); + } + + // %other + SmallVector otherElems; + if (llOther) { + // FIXME(Keren): always assume other is 0 for now + // It's not necessary for now because the pipeline pass will skip + // generating insert_slice_async if the load op has any "other" tensor. + // assert(false && "insert_slice_async: Other value not supported yet"); + otherElems = getLLVMElems(other, llOther, rewriter, loc); + assert(srcElems.size() == otherElems.size()); + } + + unsigned inVec = getVectorSize(src); + unsigned outVec = resSharedLayout.getVec(); + unsigned minVec = std::min(outVec, inVec); + unsigned numElems = getElemsPerThread(srcTy); + unsigned perPhase = resSharedLayout.getPerPhase(); + unsigned maxPhase = resSharedLayout.getMaxPhase(); + auto sizePerThread = srcBlockedLayout.getSizePerThread(); + auto threadsPerCTA = getThreadsPerCTA(srcBlockedLayout); + auto inOrder = srcBlockedLayout.getOrder(); + + // If perPhase * maxPhase > threadsPerCTA, we will have elements + // that share the same tile indices. The index calculation will + // be cached. + auto numSwizzleRows = std::max( + (perPhase * maxPhase) / threadsPerCTA[inOrder[1]], 1); + // A sharedLayout encoding has a "vec" parameter. + // On the column dimension, if inVec > outVec, it means we have to divide + // single vector read into multiple ones + auto numVecCols = std::max(inVec / outVec, 1); + + auto srcIndices = emitIndices(loc, rewriter, srcBlockedLayout, srcShape); + // <, TileOffset> + DenseMap, Value> tileOffsetMap; + for (unsigned elemIdx = 0; elemIdx < numElems; elemIdx += minVec) { + // minVec = 2, inVec = 4, outVec = 2 + // baseOffsetCol = 0 baseOffsetCol = 0 + // tileVecIdxCol = 0 tileVecIdxCol = 1 + // -/\- -/\- + // [|x x| |x x| x x x x x] + // [|x x| |x x| x x x x x] + // baseOffsetRow [|x x| |x x| x x x x x] + // [|x x| |x x| x x x x x] + auto vecIdx = elemIdx / minVec; + auto vecIdxCol = vecIdx % (sizePerThread[inOrder[0]] / minVec); + auto vecIdxRow = vecIdx / (sizePerThread[inOrder[0]] / minVec); + auto baseOffsetCol = + vecIdxCol / numVecCols * numVecCols * threadsPerCTA[inOrder[0]]; + auto baseOffsetRow = vecIdxRow / numSwizzleRows * numSwizzleRows * + threadsPerCTA[inOrder[1]]; + auto tileVecIdxCol = vecIdxCol % numVecCols; + auto tileVecIdxRow = vecIdxRow % numSwizzleRows; + + if (!tileOffsetMap.count({tileVecIdxRow, tileVecIdxCol})) { + // Swizzling + // Since the swizzling index is related to outVec, and we know minVec + // already, inVec doesn't matter + // + // (Numbers represent row indices) + // Example1: + // outVec = 2, inVec = 2, minVec = 2 + // outVec = 2, inVec = 4, minVec = 2 + // | [1 2] [3 4] [5 6] ... | + // | [3 4] [1 2] [7 8] ... | + // | [5 6] [7 8] [1 2] ... | + // Example2: + // outVec = 4, inVec = 2, minVec = 2 + // | [1 2 3 4] [5 6 7 8] [9 10 11 12] ... | + // | [5 6 7 8] [1 2 3 4] [13 14 15 16] ... | + // | [9 10 11 12] [13 14 15 16] [1 2 3 4] ... | + auto srcIdx = srcIndices[tileVecIdxRow * sizePerThread[inOrder[0]]]; + Value phase = urem(udiv(srcIdx[inOrder[1]], i32_val(perPhase)), + i32_val(maxPhase)); + // srcShape and smemObj.shape maybe different if smemObj is a + // slice of the original shared memory object. + // So we need to use the original shape to compute the offset + Value rowOffset = mul(srcIdx[inOrder[1]], srcStrides[inOrder[1]]); + Value colOffset = + add(srcIdx[inOrder[0]], i32_val(tileVecIdxCol * minVec)); + Value swizzleIdx = udiv(colOffset, i32_val(outVec)); + Value swizzleColOffset = + add(mul(xor_(swizzleIdx, phase), i32_val(outVec)), + urem(colOffset, i32_val(outVec))); + Value tileOffset = add(rowOffset, swizzleColOffset); + tileOffsetMap[{tileVecIdxRow, tileVecIdxCol}] = + gep(dstPtrTy, dstPtrBase, tileOffset); + } + + // 16 * 8 = 128bits + auto maxBitWidth = + std::max(128, resElemTy.getIntOrFloatBitWidth()); + auto vecBitWidth = resElemTy.getIntOrFloatBitWidth() * minVec; + auto bitWidth = std::min(maxBitWidth, vecBitWidth); + auto numWords = vecBitWidth / bitWidth; + auto numWordElems = bitWidth / resElemTy.getIntOrFloatBitWidth(); + + // Tune CG and CA here. + auto byteWidth = bitWidth / 8; + CacheModifier srcCacheModifier = + byteWidth == 16 ? CacheModifier::CG : CacheModifier::CA; + assert(byteWidth == 16 || byteWidth == 8 || byteWidth == 4); + auto resByteWidth = resElemTy.getIntOrFloatBitWidth() / 8; + + Value tileOffset = tileOffsetMap[{tileVecIdxRow, tileVecIdxCol}]; + Value baseOffset = + add(mul(i32_val(baseOffsetRow), srcStrides[inOrder[1]]), + i32_val(baseOffsetCol)); + Value basePtr = gep(dstPtrTy, tileOffset, baseOffset); + for (size_t wordIdx = 0; wordIdx < numWords; ++wordIdx) { + PTXBuilder ptxBuilder; + auto wordElemIdx = wordIdx * numWordElems; + auto ©AsyncOp = + *ptxBuilder.create(srcCacheModifier); + auto *dstOperand = + ptxBuilder.newAddrOperand(basePtr, "r", wordElemIdx * resByteWidth); + auto *srcOperand = + ptxBuilder.newAddrOperand(srcElems[elemIdx + wordElemIdx], "l"); + auto *copySize = ptxBuilder.newConstantOperand(byteWidth); + auto *srcSize = copySize; + if (op.mask()) { + // We don't use predicate in this case, setting src-size to 0 + // if there's any mask. cp.async will automatically fill the + // remaining slots with 0 if cp-size > src-size. + // XXX(Keren): Always assume other = 0 for now. + auto selectOp = select(maskElems[elemIdx + wordElemIdx], + i32_val(byteWidth), i32_val(0)); + srcSize = ptxBuilder.newOperand(selectOp, "r"); + } + copyAsyncOp(dstOperand, srcOperand, copySize, srcSize); + ptxBuilder.launch(rewriter, loc, void_ty(getContext())); + } + } + + PTXBuilder ptxBuilder; + ptxBuilder.create<>("cp.async.commit_group")->operator()(); + ptxBuilder.launch(rewriter, loc, void_ty(getContext())); + rewriter.replaceOp(op, llDst); + return success(); + } +}; + +void populateLoadStoreOpToLLVMPatterns(mlir::LLVMTypeConverter &typeConverter, + RewritePatternSet &patterns, + int numWarps, + AxisInfoAnalysis &axisInfoAnalysis, + const Allocation *allocation, Value smem, + PatternBenefit benefit) { + patterns.add(typeConverter, axisInfoAnalysis, benefit); + patterns.add(typeConverter, axisInfoAnalysis, benefit); + patterns.add(typeConverter, allocation, smem, + axisInfoAnalysis, benefit); + patterns.add(typeConverter, allocation, smem, + axisInfoAnalysis, benefit); + patterns.add(typeConverter, allocation, smem, + benefit); + patterns.add(typeConverter, allocation, smem, + axisInfoAnalysis, benefit); +} diff --git a/lib/Conversion/TritonGPUToLLVM/LoadStoreOpToLLVM.h b/lib/Conversion/TritonGPUToLLVM/LoadStoreOpToLLVM.h new file mode 100644 index 000000000000..96c2f1afd20a --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/LoadStoreOpToLLVM.h @@ -0,0 +1,16 @@ +#ifndef TRITON_CONVERSION_TRITONGPU_TO_LLVM_LOAD_STORE_OP_H +#define TRITON_CONVERSION_TRITONGPU_TO_LLVM_LOAD_STORE_OP_H + +#include "TritonGPUToLLVMBase.h" + +using namespace mlir; +using namespace mlir::triton; + +void populateLoadStoreOpToLLVMPatterns(mlir::LLVMTypeConverter &typeConverter, + RewritePatternSet &patterns, + int numWarps, + AxisInfoAnalysis &axisInfoAnalysis, + const Allocation *allocation, Value smem, + PatternBenefit benefit); + +#endif diff --git a/lib/Conversion/TritonGPUToLLVM/PTXAsmFormat.cpp b/lib/Conversion/TritonGPUToLLVM/PTXAsmFormat.cpp new file mode 100644 index 000000000000..ed0de6e1200f --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/PTXAsmFormat.cpp @@ -0,0 +1,217 @@ +#include "triton/Conversion/TritonGPUToLLVM/PTXAsmFormat.h" + +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Transforms/DialectConversion.h" +#include "llvm/Support/raw_ostream.h" +// TODO(Superjomn): unify to llvm::raw_string_ostream +#include + +namespace mlir { +namespace triton { + +// TODO(Superjomn) Move to a global utility file? +std::string strJoin(llvm::ArrayRef strs, + llvm::StringRef delimiter) { + std::string osStr; + llvm::raw_string_ostream os(osStr); + for (size_t i = 0; !strs.empty() && i < strs.size() - 1; ++i) + os << strs[i] << delimiter; + if (!strs.empty()) + os << strs.back(); + os.flush(); + return osStr; +} + +PTXInstr::Operand * +PTXBuilder::newOperand(mlir::Value value, StringRef constraint, + std::function formatter) { + argArchive.emplace_back(std::make_unique(value, constraint)); + auto *opr = argArchive.back().get(); + opr->repr = formatter; + opr->idx = oprCounter++; + return opr; +} + +PTXBuilder::Operand *PTXBuilder::newOperand(StringRef constraint) { + // Constraint should be something like "=r" + assert(!constraint.empty() && constraint[0] == '='); + auto *opr = newOperand(); + opr->idx = oprCounter++; + opr->constraint = constraint; + return opr; +} + +PTXBuilder::Operand *PTXBuilder::newConstantOperand(const std::string &v) { + argArchive.emplace_back(std::make_unique()); + argArchive.back()->repr = [v](int idx) { return v; }; + return argArchive.back().get(); +} + +PTXBuilder::Operand *PTXBuilder::newConstantOperand(int64_t v) { + std::stringstream ss; + ss << "0x" << std::hex << v; + return newConstantOperand(ss.str()); +} + +std::string PTXBuilder::getConstraints() const { + auto args = getAllArgs(); + llvm::SmallVector argReprs; + for (auto arg : args) + argReprs.push_back(arg->constraint); + return strJoin(argReprs, ","); +} + +llvm::SmallVector PTXBuilder::getAllMLIRArgs() const { + llvm::SmallVector res; + for (auto &arg : argArchive) { + if (!arg->isList() && arg->value) + res.push_back(arg->value); + } + return res; +} + +SmallVector PTXBuilder::getAllArgs() const { + llvm::SmallVector res; + for (auto &x : argArchive) + if (!x->isList()) + res.push_back(x.get()); + return res; +} + +mlir::Value PTXBuilder::launch(ConversionPatternRewriter &rewriter, + Location loc, Type resTy, bool hasSideEffect, + bool isAlignStack, + ArrayRef attrs) const { + auto *ctx = rewriter.getContext(); + auto inlineAsm = rewriter.create( + loc, resTy, getAllMLIRArgs(), // operands + dump(), // asm_string + getConstraints(), // constraints + hasSideEffect, // has_side_effects + isAlignStack, // is_align_stack + LLVM::AsmDialectAttr::get(ctx, + LLVM::AsmDialect::AD_ATT), // asm_dialect + ArrayAttr::get(ctx, attrs) // operand_attrs + ); + + return inlineAsm.getRes(); +} + +std::string PTXInstr::Operand::dump() const { + if (repr) + return repr(idx); + if (!isList()) + return "$" + std::to_string(idx); + + llvm::SmallVector oprs; + for (auto *opr : list) + oprs.push_back(opr->dump()); + return "{ " + strJoin(oprs, ", ") + " }"; +} + +PTXInstr::Operand *PTXBuilder::newAddrOperand(mlir::Value addr, + StringRef constraint, int off) { + auto *opr = newOperand(addr, constraint); + opr->repr = [off](int idx) -> std::string { + std::stringstream ss; + ss << "[ $" << idx << " + " << off << " ]"; + return ss.str(); + }; + + return opr; +} + +std::string PTXBuilder::dump() const { + llvm::SmallVector lines; + for (auto &exec : executions) { + lines.push_back(exec->dump()); + } + + return strJoin(lines, "\n\t"); +} + +PTXInstrExecution &PTXInstrCommon::call(ArrayRef oprs, + bool onlyAttachMLIRArgs) { + if (onlyAttachMLIRArgs) { + // Nearly impossible to make the $0,$1 in two PTX code snippets to point to + // the same MLIR values in onlyAttachMLIRArgs mode. + assert(builder->executions.empty() && + "builder can only hold a single execution when onlyAttachMIIRArgs " + "is true."); + builder->reorderArgArchive(oprs); + } + + builder->executions.emplace_back( + std::make_unique(this, oprs, onlyAttachMLIRArgs)); + + return *builder->executions.back(); +} + +PTXInstrExecution &PTXInstrCommon::operator()(ArrayRef oprs, + bool onlyAttachMLIRArgs) { + return call(oprs, onlyAttachMLIRArgs); +} + +std::string PTXInstrExecution::dump() const { + std::string osStr; + llvm::raw_string_ostream os(osStr); + + std::string instrRepr = strJoin(instr->instrParts, "."); + if (onlyAttachMLIRArgs) + return instrRepr; + + if (pred) { + if (!pred->repr) + os << "@" << pred->dump() << " "; + else + os << pred->repr(pred->idx) << " "; + } + + llvm::SmallVector argReprs; + for (auto *arg : argsInOrder) { + argReprs.push_back(arg->dump()); + } + + std::string argsRepr = strJoin(argReprs, ", "); + + os << instrRepr << " " << argsRepr << ";"; + os.flush(); + return osStr; +} + +SmallVector +PTXInstrExecution::getArgList() const { + SmallVector args; + for (auto *arg : argsInOrder) { + if (arg->isList()) + args.insert(args.end(), arg->list.begin(), arg->list.end()); + else + args.push_back(arg); + } + return args; +} + +PTXInstr &PTXInstr::global() { + o("global"); + return *this; +} + +PTXInstr &PTXInstr::shared() { + o("shared"); + return *this; +} + +PTXInstr &PTXInstr::v(int vecWidth, bool predicate) { + if (vecWidth > 1) { + o("v" + std::to_string(vecWidth), predicate); + } + return *this; +} + +PTXInstr &PTXInstr::b(int width) { + o("b" + std::to_string(width)); + return *this; +} + +} // namespace triton +} // namespace mlir diff --git a/lib/Conversion/TritonGPUToLLVM/ReduceOpToLLVM.cpp b/lib/Conversion/TritonGPUToLLVM/ReduceOpToLLVM.cpp new file mode 100644 index 000000000000..5f055fa6fa24 --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/ReduceOpToLLVM.cpp @@ -0,0 +1,488 @@ +#include "ReduceOpToLLVM.h" + +using namespace mlir; +using namespace mlir::triton; + +using ::mlir::LLVM::getElementsFromStruct; +using ::mlir::LLVM::getStructFromElements; +using ::mlir::LLVM::shflSync; +using ::mlir::LLVM::storeShared; +using ::mlir::triton::gpu::getElemsPerThread; +using ::mlir::triton::gpu::getOrder; + +struct ReduceOpConversion + : public ConvertTritonGPUOpToLLVMPattern { +public: + using ConvertTritonGPUOpToLLVMPattern< + triton::ReduceOp>::ConvertTritonGPUOpToLLVMPattern; + + LogicalResult + matchAndRewrite(triton::ReduceOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + if (ReduceOpHelper(op).isFastReduction()) + return matchAndRewriteFast(op, adaptor, rewriter); + return matchAndRewriteBasic(op, adaptor, rewriter); + } + +private: + void accumulate(ConversionPatternRewriter &rewriter, Location loc, + RedOp redOp, Value &acc, Value cur, bool isFirst) const { + if (isFirst) { + acc = cur; + return; + } + switch (redOp) { + case RedOp::ADD: + acc = add(acc, cur); + break; + case RedOp::FADD: + acc = fadd(acc.getType(), acc, cur); + break; + case RedOp::MIN: + acc = smin(acc, cur); + break; + case RedOp::MAX: + acc = smax(acc, cur); + break; + case RedOp::UMIN: + acc = umin(acc, cur); + break; + case RedOp::UMAX: + acc = umax(acc, cur); + break; + case RedOp::FMIN: + acc = fmin(acc, cur); + break; + case RedOp::FMAX: + acc = fmax(acc, cur); + break; + case RedOp::XOR: + acc = xor_(acc, cur); + break; + case RedOp::ARGMIN: + case RedOp::ARGMAX: + case RedOp::ARGUMIN: + case RedOp::ARGUMAX: + case RedOp::ARGFMIN: + case RedOp::ARGFMAX: + llvm::report_fatal_error( + "This accumulate implementation is not for argmin / argmax"); + default: + llvm::report_fatal_error("Unsupported reduce op"); + } + } + + void accumulateWithIndex(ConversionPatternRewriter &rewriter, Location loc, + RedOp redOp, Value &acc, Value &accIndex, Value cur, + Value curIndex, bool isFirst) const { + if (isFirst) { + acc = cur; + accIndex = curIndex; + return; + } + switch (redOp) { + case RedOp::ARGMIN: + accIndex = select( + icmp_slt(acc, cur), accIndex, + select(icmp_sgt(acc, cur), curIndex, smin(accIndex, curIndex))); + acc = smin(acc, cur); + break; + case RedOp::ARGMAX: + accIndex = select( + icmp_sgt(acc, cur), accIndex, + select(icmp_slt(acc, cur), curIndex, smin(accIndex, curIndex))); + acc = smax(acc, cur); + break; + case RedOp::ARGUMIN: + accIndex = select( + icmp_ult(acc, cur), accIndex, + select(icmp_ugt(acc, cur), curIndex, smin(accIndex, curIndex))); + acc = umin(acc, cur); + break; + case RedOp::ARGUMAX: + accIndex = select( + icmp_ugt(acc, cur), accIndex, + select(icmp_ult(acc, cur), curIndex, smin(accIndex, curIndex))); + acc = umax(acc, cur); + break; + case RedOp::ARGFMIN: + accIndex = select( + fcmp_olt(acc, cur), accIndex, + select(fcmp_ogt(acc, cur), curIndex, smin(accIndex, curIndex))); + acc = fmin(acc, cur); + break; + case RedOp::ARGFMAX: + accIndex = select( + fcmp_ogt(acc, cur), accIndex, + select(fcmp_olt(acc, cur), curIndex, smin(accIndex, curIndex))); + acc = fmax(acc, cur); + break; + case RedOp::ADD: + case RedOp::FADD: + case RedOp::MIN: + case RedOp::MAX: + case RedOp::UMIN: + case RedOp::UMAX: + case RedOp::FMIN: + case RedOp::FMAX: + case RedOp::XOR: + llvm::report_fatal_error( + "This accumulate implementation is only for argmin / argmax"); + default: + llvm::report_fatal_error("Unsupported reduce op"); + } + } + + // Use shared memory for reduction within warps and across warps + LogicalResult + matchAndRewriteBasic(triton::ReduceOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const { + Location loc = op->getLoc(); + unsigned axis = op.axis(); + bool withIndex = triton::ReduceOp::withIndex(op.redOp()); + + auto srcTy = op.operand().getType().cast(); + auto srcLayout = srcTy.getEncoding().cast(); + auto srcOrd = srcLayout.getOrder(); + auto srcShape = srcTy.getShape(); + + auto llvmElemTy = getTypeConverter()->convertType(srcTy.getElementType()); + auto llvmIndexTy = getTypeConverter()->getIndexType(); + auto elemPtrTy = LLVM::LLVMPointerType::get(llvmElemTy, 3); + auto indexPtrTy = LLVM::LLVMPointerType::get(llvmIndexTy, 3); + Value smemBase = getSharedMemoryBase(loc, rewriter, op.getOperation()); + smemBase = bitcast(smemBase, elemPtrTy); + + ReduceOpHelper helper(op); + auto smemShape = helper.getScratchConfigBasic(); + unsigned elems = product(smemShape); + Value indexSmemBase = gep(elemPtrTy, smemBase, i32_val(elems)); + indexSmemBase = bitcast(indexSmemBase, indexPtrTy); + + unsigned srcElems = getElemsPerThread(srcTy); + auto srcIndices = emitIndices(loc, rewriter, srcLayout, srcShape); + auto srcValues = getElementsFromStruct(loc, adaptor.operand(), rewriter); + + SmallVector> offset = + emitOffsetForBlockedLayout(srcLayout, srcShape); + + std::map, Value> accs; + std::map, Value> accIndices; + std::map, SmallVector> indices; + + // reduce within threads + for (unsigned i = 0; i < srcElems; ++i) { + SmallVector key = offset[i]; + key[axis] = 0; + bool isFirst = accs.find(key) == accs.end(); + if (!withIndex) { + accumulate(rewriter, loc, op.redOp(), accs[key], srcValues[i], isFirst); + } else { + Value curIndex = srcIndices[i][axis]; + accumulateWithIndex(rewriter, loc, op.redOp(), accs[key], + accIndices[key], srcValues[i], curIndex, isFirst); + } + if (isFirst) + indices[key] = srcIndices[i]; + } + + // cached int32 constants + std::map ints; + ints[0] = i32_val(0); + for (int N = smemShape[axis] / 2; N > 0; N >>= 1) + ints[N] = i32_val(N); + Value sizePerThread = i32_val(srcLayout.getSizePerThread()[axis]); + + // reduce across threads + for (auto it : accs) { + const SmallVector &key = it.first; + Value acc = it.second; + Value accIndex; + if (withIndex) + accIndex = accIndices[key]; + SmallVector writeIdx = indices[key]; + + writeIdx[axis] = udiv(writeIdx[axis], sizePerThread); + Value writeOffset = linearize(rewriter, loc, writeIdx, smemShape, srcOrd); + Value writePtr = gep(elemPtrTy, smemBase, writeOffset); + Value indexWritePtr = gep(indexPtrTy, indexSmemBase, writeOffset); + store(acc, writePtr); + if (withIndex) + store(accIndex, indexWritePtr); + + SmallVector readIdx(writeIdx.size(), ints[0]); + for (int N = smemShape[axis] / 2; N > 0; N >>= 1) { + readIdx[axis] = ints[N]; + Value readMask = icmp_slt(writeIdx[axis], ints[N]); + Value readOffset = select( + readMask, linearize(rewriter, loc, readIdx, smemShape, srcOrd), + ints[0]); + Value readPtr = gep(elemPtrTy, writePtr, readOffset); + barrier(); + if (!withIndex) { + Value cur = load(readPtr); + accumulate(rewriter, loc, op.redOp(), acc, cur, false); + barrier(); + store(acc, writePtr); + } else { + Value cur = load(readPtr); + Value indexReadPtr = gep(indexPtrTy, indexWritePtr, readOffset); + Value curIndex = load(indexReadPtr); + accumulateWithIndex(rewriter, loc, op.redOp(), acc, accIndex, cur, + curIndex, false); + barrier(); + store(acc, writePtr); + store(accIndex, indexWritePtr); + } + } + } + + barrier(); + + // set output values + if (auto resultTy = op.getType().dyn_cast()) { + // nd-tensor where n >= 1 + auto resultLayout = resultTy.getEncoding(); + auto resultShape = resultTy.getShape(); + + unsigned resultElems = getElemsPerThread(resultTy); + auto resultIndices = + emitIndices(loc, rewriter, resultLayout, resultShape); + assert(resultIndices.size() == resultElems); + + SmallVector resultVals(resultElems); + for (unsigned i = 0; i < resultElems; ++i) { + SmallVector readIdx = resultIndices[i]; + readIdx.insert(readIdx.begin() + axis, ints[0]); + Value readOffset = linearize(rewriter, loc, readIdx, smemShape, srcOrd); + Value readPtr = gep(elemPtrTy, smemBase, readOffset); + Value indexReadPtr = gep(indexPtrTy, indexSmemBase, readOffset); + resultVals[i] = withIndex ? load(indexReadPtr) : load(readPtr); + } + + SmallVector resultTypes(resultElems, + withIndex ? llvmIndexTy : llvmElemTy); + Type structTy = + LLVM::LLVMStructType::getLiteral(this->getContext(), resultTypes); + Value ret = getStructFromElements(loc, resultVals, rewriter, structTy); + rewriter.replaceOp(op, ret); + } else { + // 0d-tensor -> scalar + Value resultVal = withIndex ? load(indexSmemBase) : load(smemBase); + rewriter.replaceOp(op, resultVal); + } + + return success(); + } + + // Use warp shuffle for reduction within warps and shared memory for data + // exchange across warps + LogicalResult matchAndRewriteFast(triton::ReduceOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const { + Location loc = op->getLoc(); + unsigned axis = adaptor.axis(); + bool withIndex = triton::ReduceOp::withIndex(op.redOp()); + + auto srcTy = op.operand().getType().cast(); + auto srcLayout = srcTy.getEncoding(); + auto srcShape = srcTy.getShape(); + auto srcRank = srcTy.getRank(); + auto order = getOrder(srcLayout); + + auto threadsPerWarp = triton::gpu::getThreadsPerWarp(srcLayout); + auto warpsPerCTA = triton::gpu::getWarpsPerCTA(srcLayout); + + auto llvmElemTy = getTypeConverter()->convertType(srcTy.getElementType()); + auto llvmIndexTy = getTypeConverter()->getIndexType(); + auto elemPtrTy = LLVM::LLVMPointerType::get(llvmElemTy, 3); + auto indexPtrTy = LLVM::LLVMPointerType::get(llvmIndexTy, 3); + Value smemBase = getSharedMemoryBase(loc, rewriter, op.getOperation()); + smemBase = bitcast(smemBase, elemPtrTy); + + ReduceOpHelper helper(op); + auto smemShapes = helper.getScratchConfigsFast(); + unsigned elems = product(smemShapes[0]); + unsigned maxElems = std::max(elems, product(smemShapes[1])); + Value indexSmemBase = gep(elemPtrTy, smemBase, i32_val(maxElems)); + indexSmemBase = bitcast(indexSmemBase, indexPtrTy); + + unsigned sizeIntraWarps = helper.getIntraWarpSize(); + unsigned sizeInterWarps = helper.getInterWarpSize(); + + unsigned srcElems = getElemsPerThread(srcTy); + auto srcIndices = emitIndices(loc, rewriter, srcLayout, srcShape); + auto srcValues = getElementsFromStruct(loc, adaptor.operand(), rewriter); + + SmallVector> offset = + emitOffsetForLayout(srcLayout, srcShape); + + std::map, Value> accs; + std::map, Value> accIndices; + std::map, SmallVector> indices; + + // reduce within threads + for (unsigned i = 0; i < srcElems; ++i) { + SmallVector key = offset[i]; + key[axis] = 0; + bool isFirst = accs.find(key) == accs.end(); + if (!withIndex) { + accumulate(rewriter, loc, op.redOp(), accs[key], srcValues[i], isFirst); + } else { + Value curIndex = srcIndices[i][axis]; + accumulateWithIndex(rewriter, loc, op.redOp(), accs[key], + accIndices[key], srcValues[i], curIndex, isFirst); + } + if (isFirst) + indices[key] = srcIndices[i]; + } + + Value threadId = getThreadId(rewriter, loc); + Value warpSize = i32_val(32); + Value warpId = udiv(threadId, warpSize); + Value laneId = urem(threadId, warpSize); + + SmallVector multiDimLaneId = + delinearize(rewriter, loc, laneId, threadsPerWarp, order); + SmallVector multiDimWarpId = + delinearize(rewriter, loc, warpId, warpsPerCTA, order); + + Value laneIdAxis = multiDimLaneId[axis]; + Value warpIdAxis = multiDimWarpId[axis]; + + Value zero = i32_val(0); + Value laneZero = icmp_eq(laneIdAxis, zero); + Value warpZero = icmp_eq(warpIdAxis, zero); + + for (auto it : accs) { + const SmallVector &key = it.first; + Value acc = it.second; + Value accIndex; + if (withIndex) + accIndex = accIndices[key]; + + // Reduce within warps + for (unsigned N = sizeIntraWarps / 2; N > 0; N >>= 1) { + Value shfl = shflSync(loc, rewriter, acc, N); + if (!withIndex) { + accumulate(rewriter, loc, op.redOp(), acc, shfl, false); + } else { + Value shflIndex = shflSync(loc, rewriter, accIndex, N); + accumulateWithIndex(rewriter, loc, op.redOp(), acc, accIndex, shfl, + shflIndex, false); + } + } + + SmallVector writeIdx = indices[key]; + writeIdx[axis] = (sizeInterWarps == 1) ? zero : warpIdAxis; + Value writeOffset = + linearize(rewriter, loc, writeIdx, smemShapes[0], order); + Value writePtr = gep(elemPtrTy, smemBase, writeOffset); + storeShared(rewriter, loc, writePtr, acc, laneZero); + if (withIndex) { + Value indexWritePtr = gep(indexPtrTy, indexSmemBase, writeOffset); + storeShared(rewriter, loc, indexWritePtr, accIndex, laneZero); + } + } + + barrier(); + + // The second round of shuffle reduction + // now the problem size: sizeInterWarps, s1, s2, .. , sn + // where sizeInterWarps is 2^m + // + // Each thread needs to process: + // elemsPerThread = sizeInterWarps * s1 * s2 .. Sn / numThreads + unsigned numThreads = + product(triton::gpu::getWarpsPerCTA(srcLayout)) * 32; + unsigned elemsPerThread = std::max(elems / numThreads, 1); + Value readOffset = threadId; + for (unsigned round = 0; round < elemsPerThread; ++round) { + Value readPtr = gep(elemPtrTy, smemBase, readOffset); + // FIXME(Qingyi): need predicate icmp_slt(threadId, + // i32_val(sizeInerWarps)) + Value acc = load(readPtr); + Value accIndex; + if (withIndex) { + Value readIndexPtr = gep(indexPtrTy, indexSmemBase, readOffset); + accIndex = load(readIndexPtr); + } + + for (unsigned N = sizeInterWarps / 2; N > 0; N >>= 1) { + Value shfl = shflSync(loc, rewriter, acc, N); + if (!withIndex) { + accumulate(rewriter, loc, op.redOp(), acc, shfl, false); + } else { + Value shflIndex = shflSync(loc, rewriter, accIndex, N); + accumulateWithIndex(rewriter, loc, op.redOp(), acc, accIndex, shfl, + shflIndex, false); + } + } + + // only the first thread in each sizeInterWarps is writing + Value writeOffset = readOffset; + Value writePtr = gep(elemPtrTy, smemBase, writeOffset); + Value threadIsNeeded = icmp_slt(threadId, i32_val(elems)); + Value laneIdModSizeInterWarps = urem(laneId, i32_val(sizeInterWarps)); + Value laneIdModSizeInterWarpsIsZero = + icmp_eq(laneIdModSizeInterWarps, zero); + Value pred = and_(threadIsNeeded, laneIdModSizeInterWarpsIsZero); + storeShared(rewriter, loc, writePtr, acc, pred); + if (withIndex) { + Value writeIndexPtr = gep(indexPtrTy, indexSmemBase, writeOffset); + storeShared(rewriter, loc, writeIndexPtr, accIndex, pred); + } + + if (round != elemsPerThread - 1) { + readOffset = add(readOffset, i32_val(numThreads)); + } + } + + // We could avoid this barrier in some of the layouts, however this is not + // the general case. + // TODO: optimize the barrier incase the layouts are accepted. + barrier(); + + // set output values + if (auto resultTy = op.getType().dyn_cast()) { + // nd-tensor where n >= 1 + auto resultLayout = resultTy.getEncoding().cast(); + auto resultShape = resultTy.getShape(); + unsigned resultElems = getElemsPerThread(resultTy); + auto resultIndices = + emitIndices(loc, rewriter, resultLayout, resultShape); + assert(resultIndices.size() == resultElems); + + SmallVector resultVals(resultElems); + for (size_t i = 0; i < resultElems; ++i) { + SmallVector readIdx = resultIndices[i]; + readIdx.insert(readIdx.begin() + axis, i32_val(0)); + Value readOffset = + linearize(rewriter, loc, readIdx, smemShapes[0], order); + Value readPtr = gep(elemPtrTy, smemBase, readOffset); + Value indexReadPtr = gep(indexPtrTy, indexSmemBase, readOffset); + resultVals[i] = withIndex ? load(indexReadPtr) : load(readPtr); + } + + SmallVector resultTypes(resultElems, + withIndex ? llvmIndexTy : llvmElemTy); + Type structTy = + LLVM::LLVMStructType::getLiteral(this->getContext(), resultTypes); + Value ret = getStructFromElements(loc, resultVals, rewriter, structTy); + rewriter.replaceOp(op, ret); + } else { + // 0d-tensor -> scalar + Value resultVal = withIndex ? load(indexSmemBase) : load(smemBase); + rewriter.replaceOp(op, resultVal); + } + + return success(); + } +}; + +void populateReduceOpToLLVMPatterns(mlir::LLVMTypeConverter &typeConverter, + RewritePatternSet &patterns, int numWarps, + AxisInfoAnalysis &axisInfoAnalysis, + const Allocation *allocation, Value smem, + PatternBenefit benefit) { + patterns.add(typeConverter, allocation, smem, benefit); +} diff --git a/lib/Conversion/TritonGPUToLLVM/ReduceOpToLLVM.h b/lib/Conversion/TritonGPUToLLVM/ReduceOpToLLVM.h new file mode 100644 index 000000000000..fc4c5145c702 --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/ReduceOpToLLVM.h @@ -0,0 +1,15 @@ +#ifndef TRITON_CONVERSION_TRITONGPU_TO_LLVM_REDUCE_OP_H +#define TRITON_CONVERSION_TRITONGPU_TO_LLVM_REDUCE_OP_H + +#include "TritonGPUToLLVMBase.h" + +using namespace mlir; +using namespace mlir::triton; + +void populateReduceOpToLLVMPatterns(mlir::LLVMTypeConverter &typeConverter, + RewritePatternSet &patterns, int numWarps, + AxisInfoAnalysis &axisInfoAnalysis, + const Allocation *allocation, Value smem, + PatternBenefit benefit); + +#endif \ No newline at end of file diff --git a/lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVM.cpp b/lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVM.cpp new file mode 100644 index 000000000000..3fcb83d95844 --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVM.cpp @@ -0,0 +1,521 @@ +#include "TritonGPUToLLVM.h" +#include "DotOpHelpers.h" +#include "Utility.h" + +using namespace mlir; +using namespace mlir::triton; + +using ::mlir::LLVM::getElementsFromStruct; +using ::mlir::LLVM::getSharedMemoryObjectFromStruct; +using ::mlir::LLVM::getStructFromElements; +using ::mlir::triton::gpu::getElemsPerThread; +using ::mlir::triton::gpu::SharedEncodingAttr; + +struct ReturnOpConversion : public ConvertOpToLLVMPattern<::mlir::ReturnOp> { + using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; + + LogicalResult + matchAndRewrite(ReturnOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + unsigned numArguments = op.getNumOperands(); + + // Currently, Triton kernel function always return nothing. + // TODO(Superjomn) add support for non-inline device function + if (numArguments > 0) { + return rewriter.notifyMatchFailure( + op, "Only kernel function with nothing returned is supported."); + } + + rewriter.replaceOpWithNewOp(op, TypeRange(), ValueRange(), + op->getAttrs()); + return success(); + } +}; + +struct BroadcastOpConversion + : public ConvertTritonGPUOpToLLVMPattern { + using ConvertTritonGPUOpToLLVMPattern< + triton::BroadcastOp>::ConvertTritonGPUOpToLLVMPattern; + + LogicalResult + matchAndRewrite(triton::BroadcastOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + // Following the order of indices in the legacy code, a broadcast of: + // [s(0), s(1) ... s(k-1), 1, s(k+1), s(k+2) ... s(n-1)] + // => + // [s(0), s(1) ... s(k-1), s(k), s(k+1), s(k+2) ... s(n-1)] + // + // logically maps to a broadcast within a thread's scope: + // [cta(0)..cta(k-1), 1,cta(k+1)..cta(n-1),spt(0)..spt(k-1), + // 1,spt(k+1)..spt(n-1)] + // => + // [cta(0)..cta(k-1),cta(k),cta(k+1)..cta(n-1),spt(0)..spt(k-1),spt(k),spt(k+1)..spt(n-1)] + // + // regardless of the order of the layout + // + Location loc = op->getLoc(); + Value src = adaptor.src(); + Value result = op.result(); + auto srcTy = op.src().getType().cast(); + auto resultTy = result.getType().cast(); + auto srcLayout = srcTy.getEncoding(); + auto resultLayout = resultTy.getEncoding(); + auto srcShape = srcTy.getShape(); + auto resultShape = resultTy.getShape(); + unsigned rank = srcTy.getRank(); + assert(rank == resultTy.getRank()); + auto order = triton::gpu::getOrder(srcLayout); + auto srcOffsets = emitOffsetForLayout(srcLayout, srcShape); + auto resultOffsets = emitOffsetForLayout(resultLayout, resultShape); + SmallVector srcVals = getElementsFromStruct(loc, src, rewriter); + DenseMap, Value, SmallVectorKeyInfo> srcValues; + for (size_t i = 0; i < srcOffsets.size(); i++) { + srcValues[srcOffsets[i]] = srcVals[i]; + } + SmallVector resultVals; + for (size_t i = 0; i < resultOffsets.size(); i++) { + auto offset = resultOffsets[i]; + for (size_t j = 0; j < srcShape.size(); j++) + if (srcShape[j] == 1) + offset[j] = 0; + resultVals.push_back(srcValues.lookup(offset)); + } + auto llvmStructTy = getTypeConverter()->convertType(resultTy); + Value resultStruct = + getStructFromElements(loc, resultVals, rewriter, llvmStructTy); + rewriter.replaceOp(op, {resultStruct}); + return success(); + } +}; + +struct PrintfOpConversion + : public ConvertTritonGPUOpToLLVMPattern { + using ConvertTritonGPUOpToLLVMPattern< + triton::PrintfOp>::ConvertTritonGPUOpToLLVMPattern; + + LogicalResult + matchAndRewrite(triton::PrintfOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto loc = op->getLoc(); + SmallVector operands; + for (auto operand : adaptor.getOperands()) { + auto sub_operands = getElementsFromStruct(loc, operand, rewriter); + for (auto elem : sub_operands) { + operands.push_back(elem); + } + } + std::string formatStr; + llvm::raw_string_ostream os(formatStr); + os << op.prefix(); + if (!operands.empty()) { + os << getFormatSubstr(operands[0]); + } + + for (size_t i = 1; i < operands.size(); ++i) { + os << ", " << getFormatSubstr(operands[i]); + } + llPrintf(formatStr, operands, rewriter); + rewriter.eraseOp(op); + return success(); + } + + std::string getFormatSubstr(Value value) const { + Type type = value.getType(); + if (type.isa()) { + return "%p"; + } else if (type.isBF16() || type.isF16() || type.isF32() || type.isF64()) { + return "%f"; + } else if (type.isSignedInteger()) { + return "%i"; + } else if (type.isUnsignedInteger() || type.isSignlessInteger()) { + return "%u"; + } + assert(false && "not supported type"); + return ""; + } + + // declare vprintf(i8*, i8*) as external function + static LLVM::LLVMFuncOp + getVprintfDeclaration(ConversionPatternRewriter &rewriter) { + auto moduleOp = + rewriter.getBlock()->getParent()->getParentOfType(); + StringRef funcName("vprintf"); + Operation *funcOp = moduleOp.lookupSymbol(funcName); + if (funcOp) + return cast(*funcOp); + + auto *context = rewriter.getContext(); + + SmallVector argsType{ptr_ty(IntegerType::get(context, 8)), + ptr_ty(IntegerType::get(context, 8))}; + auto funcType = LLVM::LLVMFunctionType::get(i32_ty, argsType); + + ConversionPatternRewriter::InsertionGuard guard(rewriter); + rewriter.setInsertionPointToStart(moduleOp.getBody()); + + return rewriter.create(UnknownLoc::get(context), funcName, + funcType); + } + + // extend integer to int32, extend float to float64 + // this comes from vprintf alignment requirements. + static std::pair + promoteValue(ConversionPatternRewriter &rewriter, Value value) { + auto *context = rewriter.getContext(); + auto type = value.getType(); + Value newOp = value; + Type newType = type; + + bool bUnsigned = type.isUnsignedInteger(); + if (type.isIntOrIndex() && type.getIntOrFloatBitWidth() < 32) { + if (bUnsigned) { + newType = ui32_ty; + newOp = rewriter.create(UnknownLoc::get(context), newType, + value); + } else { + newType = i32_ty; + newOp = rewriter.create(UnknownLoc::get(context), newType, + value); + } + } else if (type.isBF16() || type.isF16() || type.isF32()) { + newType = f64_ty; + newOp = rewriter.create(UnknownLoc::get(context), newType, + value); + } + + return {newType, newOp}; + } + + static void llPrintf(StringRef msg, ValueRange args, + ConversionPatternRewriter &rewriter) { + static const char formatStringPrefix[] = "printfFormat_"; + assert(!msg.empty() && "printf with empty string not support"); + Type int8Ptr = ptr_ty(i8_ty); + + auto *context = rewriter.getContext(); + auto moduleOp = + rewriter.getBlock()->getParent()->getParentOfType(); + auto funcOp = getVprintfDeclaration(rewriter); + + Value one = rewriter.create( + UnknownLoc::get(context), i32_ty, rewriter.getI32IntegerAttr(1)); + Value zero = rewriter.create( + UnknownLoc::get(context), i32_ty, rewriter.getI32IntegerAttr(0)); + + unsigned stringNumber = 0; + SmallString<16> stringConstName; + do { + stringConstName.clear(); + (formatStringPrefix + Twine(stringNumber++)).toStringRef(stringConstName); + } while (moduleOp.lookupSymbol(stringConstName)); + + llvm::SmallString<64> formatString(msg); + formatString.push_back('\n'); + formatString.push_back('\0'); + size_t formatStringSize = formatString.size_in_bytes(); + auto globalType = LLVM::LLVMArrayType::get(i8_ty, formatStringSize); + + LLVM::GlobalOp global; + { + ConversionPatternRewriter::InsertionGuard guard(rewriter); + rewriter.setInsertionPointToStart(moduleOp.getBody()); + global = rewriter.create( + UnknownLoc::get(context), globalType, + /*isConstant=*/true, LLVM::Linkage::Internal, stringConstName, + rewriter.getStringAttr(formatString)); + } + + Value globalPtr = + rewriter.create(UnknownLoc::get(context), global); + Value stringStart = rewriter.create( + UnknownLoc::get(context), int8Ptr, globalPtr, + SmallVector({zero, zero})); + + Value bufferPtr = + rewriter.create(UnknownLoc::get(context), int8Ptr); + + SmallVector newArgs; + if (args.size() >= 1) { + SmallVector argTypes; + for (auto arg : args) { + Type newType; + Value newArg; + std::tie(newType, newArg) = promoteValue(rewriter, arg); + argTypes.push_back(newType); + newArgs.push_back(newArg); + } + + Type structTy = LLVM::LLVMStructType::getLiteral(context, argTypes); + auto allocated = rewriter.create(UnknownLoc::get(context), + ptr_ty(structTy), one, + /*alignment=*/0); + + for (const auto &entry : llvm::enumerate(newArgs)) { + auto index = rewriter.create( + UnknownLoc::get(context), i32_ty, + rewriter.getI32IntegerAttr(entry.index())); + auto fieldPtr = rewriter.create( + UnknownLoc::get(context), ptr_ty(argTypes[entry.index()]), + allocated, ArrayRef{zero, index}); + rewriter.create(UnknownLoc::get(context), entry.value(), + fieldPtr); + } + bufferPtr = rewriter.create(UnknownLoc::get(context), + int8Ptr, allocated); + } + + SmallVector operands{stringStart, bufferPtr}; + rewriter.create(UnknownLoc::get(context), funcOp, operands); + } +}; + +struct MakeRangeOpConversion + : public ConvertTritonGPUOpToLLVMPattern { + + MakeRangeOpConversion(LLVMTypeConverter &converter, PatternBenefit benefit) + : ConvertTritonGPUOpToLLVMPattern(converter, + benefit) {} + + LogicalResult + matchAndRewrite(triton::MakeRangeOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Location loc = op->getLoc(); + auto rankedTy = op.result().getType().dyn_cast(); + auto shape = rankedTy.getShape(); + auto layout = rankedTy.getEncoding(); + + auto elemTy = rankedTy.getElementType(); + assert(elemTy.isInteger(32)); + Value start = createIndexAttrConstant(rewriter, loc, elemTy, op.start()); + auto idxs = emitIndices(loc, rewriter, layout, shape); + unsigned elems = idxs.size(); + SmallVector retVals(elems); + // TODO: slice layout has more elements than expected. + // Unexpected behavior for make range, but generally OK when followed by + // expand dims + broadcast. very weird behavior otherwise potentially. + for (const auto multiDim : llvm::enumerate(idxs)) { + assert(multiDim.value().size() == 1); + retVals[multiDim.index()] = add(multiDim.value()[0], start); + } + SmallVector types(elems, elemTy); + Type structTy = LLVM::LLVMStructType::getLiteral(getContext(), types); + Value result = getStructFromElements(loc, retVals, rewriter, structTy); + rewriter.replaceOp(op, result); + return success(); + } +}; + +struct GetProgramIdOpConversion + : public ConvertTritonGPUOpToLLVMPattern { + using ConvertTritonGPUOpToLLVMPattern< + triton::GetProgramIdOp>::ConvertTritonGPUOpToLLVMPattern; + + LogicalResult + matchAndRewrite(triton::GetProgramIdOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Location loc = op->getLoc(); + assert(op.axis() < 3); + + Value blockId = rewriter.create<::mlir::gpu::BlockIdOp>( + loc, rewriter.getIndexType(), dims[op.axis()]); + auto llvmIndexTy = getTypeConverter()->getIndexType(); + rewriter.replaceOpWithNewOp( + op, TypeRange{llvmIndexTy}, ValueRange{blockId}); + return success(); + } + + static constexpr mlir::gpu::Dimension dims[] = {mlir::gpu::Dimension::x, + mlir::gpu::Dimension::y, + mlir::gpu::Dimension::z}; +}; + +struct GetNumProgramsOpConversion + : public ConvertTritonGPUOpToLLVMPattern { + using ConvertTritonGPUOpToLLVMPattern< + triton::GetNumProgramsOp>::ConvertTritonGPUOpToLLVMPattern; + + LogicalResult + matchAndRewrite(triton::GetNumProgramsOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Location loc = op->getLoc(); + assert(op.axis() < 3); + + Value blockId = rewriter.create<::mlir::gpu::GridDimOp>( + loc, rewriter.getIndexType(), dims[op.axis()]); + auto llvmIndexTy = getTypeConverter()->getIndexType(); + rewriter.replaceOpWithNewOp( + op, TypeRange{llvmIndexTy}, ValueRange{blockId}); + return success(); + } + + static constexpr mlir::gpu::Dimension dims[] = {mlir::gpu::Dimension::x, + mlir::gpu::Dimension::y, + mlir::gpu::Dimension::z}; +}; + +struct AddPtrOpConversion + : public ConvertTritonGPUOpToLLVMPattern { + using ConvertTritonGPUOpToLLVMPattern< + triton::AddPtrOp>::ConvertTritonGPUOpToLLVMPattern; + + LogicalResult + matchAndRewrite(triton::AddPtrOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Location loc = op->getLoc(); + auto resultTy = op.getType(); + auto resultTensorTy = resultTy.dyn_cast(); + if (resultTensorTy) { + unsigned elems = getElemsPerThread(resultTy); + Type elemTy = + getTypeConverter()->convertType(resultTensorTy.getElementType()); + SmallVector types(elems, elemTy); + Type structTy = LLVM::LLVMStructType::getLiteral(getContext(), types); + auto ptrs = getElementsFromStruct(loc, adaptor.ptr(), rewriter); + auto offsets = getElementsFromStruct(loc, adaptor.offset(), rewriter); + SmallVector resultVals(elems); + for (unsigned i = 0; i < elems; ++i) { + resultVals[i] = gep(elemTy, ptrs[i], offsets[i]); + } + Value view = getStructFromElements(loc, resultVals, rewriter, structTy); + rewriter.replaceOp(op, view); + } else { + assert(resultTy.isa()); + Type llResultTy = getTypeConverter()->convertType(resultTy); + Value result = gep(llResultTy, adaptor.ptr(), adaptor.offset()); + rewriter.replaceOp(op, result); + } + return success(); + } +}; + +struct AllocTensorOpConversion + : public ConvertTritonGPUOpToLLVMPattern { + using ConvertTritonGPUOpToLLVMPattern< + triton::gpu::AllocTensorOp>::ConvertTritonGPUOpToLLVMPattern; + + LogicalResult + matchAndRewrite(triton::gpu::AllocTensorOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Location loc = op->getLoc(); + Value smemBase = getSharedMemoryBase(loc, rewriter, op.getResult()); + auto resultTy = op.getType().dyn_cast(); + auto llvmElemTy = + getTypeConverter()->convertType(resultTy.getElementType()); + auto elemPtrTy = ptr_ty(llvmElemTy, 3); + smemBase = bitcast(smemBase, elemPtrTy); + auto order = resultTy.getEncoding().cast().getOrder(); + // Workaround for 3D tensors + // TODO: we need to modify the pipeline pass to give a proper shared + // encoding to 3D tensors + SmallVector newOrder; + if (resultTy.getShape().size() == 3) + newOrder = {1 + order[0], 1 + order[1], 0}; + else + newOrder = SmallVector(order.begin(), order.end()); + + auto smemObj = SharedMemoryObject(smemBase, resultTy.getShape(), newOrder, + loc, rewriter); + auto retVal = getStructFromSharedMemoryObject(loc, smemObj, rewriter); + rewriter.replaceOp(op, retVal); + return success(); + } +}; + +struct ExtractSliceOpConversion + : public ConvertTritonGPUOpToLLVMPattern { + using ConvertTritonGPUOpToLLVMPattern< + tensor::ExtractSliceOp>::ConvertTritonGPUOpToLLVMPattern; + + LogicalResult + matchAndRewrite(tensor::ExtractSliceOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + // %dst = extract_slice %src[%offsets] + Location loc = op->getLoc(); + auto srcTy = op.source().getType().dyn_cast(); + auto srcLayout = srcTy.getEncoding().dyn_cast(); + assert(srcLayout && "Unexpected resultLayout in ExtractSliceOpConversion"); + assert(op.hasUnitStride() && + "Only unit stride supported by ExtractSliceOpConversion"); + + // newBase = base + offset + // Triton supports either static and dynamic offsets + auto smemObj = + getSharedMemoryObjectFromStruct(loc, adaptor.source(), rewriter); + SmallVector opOffsetVals; + SmallVector offsetVals; + auto mixedOffsets = op.getMixedOffsets(); + for (auto i = 0; i < mixedOffsets.size(); ++i) { + if (op.isDynamicOffset(i)) + opOffsetVals.emplace_back(adaptor.offsets()[i]); + else + opOffsetVals.emplace_back(i32_val(op.getStaticOffset(i))); + offsetVals.emplace_back(add(smemObj.offsets[i], opOffsetVals[i])); + } + // Compute the offset based on the original strides of the shared memory + // object + auto offset = dot(rewriter, loc, opOffsetVals, smemObj.strides); + // newShape = rank_reduce(shape) + // Triton only supports static tensor sizes + SmallVector strideVals; + for (auto i = 0; i < op.static_sizes().size(); ++i) { + if (op.getStaticSize(i) == 1) { + offsetVals.erase(offsetVals.begin() + i); + } else { + strideVals.emplace_back(smemObj.strides[i]); + } + } + + auto llvmElemTy = getTypeConverter()->convertType(srcTy.getElementType()); + auto elemPtrTy = ptr_ty(llvmElemTy, 3); + auto resTy = op.getType().dyn_cast(); + smemObj = SharedMemoryObject(gep(elemPtrTy, smemObj.base, offset), + strideVals, offsetVals); + auto retVal = getStructFromSharedMemoryObject(loc, smemObj, rewriter); + rewriter.replaceOp(op, retVal); + return success(); + } +}; + +struct AsyncWaitOpConversion + : public ConvertTritonGPUOpToLLVMPattern { + using ConvertTritonGPUOpToLLVMPattern< + triton::gpu::AsyncWaitOp>::ConvertTritonGPUOpToLLVMPattern; + + LogicalResult + matchAndRewrite(triton::gpu::AsyncWaitOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + PTXBuilder ptxBuilder; + auto &asyncWaitOp = *ptxBuilder.create<>("cp.async.wait_group"); + auto num = op->getAttrOfType("num").getInt(); + asyncWaitOp(ptxBuilder.newConstantOperand(num)); + + auto ctx = op.getContext(); + auto loc = op.getLoc(); + auto voidTy = void_ty(ctx); + ptxBuilder.launch(rewriter, loc, voidTy); + + // Safe to remove the op since it doesn't have any return value. + rewriter.eraseOp(op); + return success(); + } +}; + +void populateTritonGPUToLLVMPatterns(mlir::LLVMTypeConverter &typeConverter, + RewritePatternSet &patterns, int numWarps, + AxisInfoAnalysis &axisInfoAnalysis, + const Allocation *allocation, Value smem, + PatternBenefit benefit) { + patterns.add(typeConverter, benefit); + patterns.add(typeConverter, allocation, smem, + benefit); + patterns.add(typeConverter, benefit); + patterns.add(typeConverter, benefit); + + patterns.add(typeConverter, allocation, smem, + benefit); + patterns.add(typeConverter, benefit); + patterns.add(typeConverter, benefit); + patterns.add(typeConverter, benefit); + patterns.add(typeConverter, benefit); + patterns.add(typeConverter, benefit); +} \ No newline at end of file diff --git a/lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVM.h b/lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVM.h new file mode 100644 index 000000000000..e96330176830 --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVM.h @@ -0,0 +1,15 @@ +#ifndef TRITON_CONVERSION_TRITONGPU_TO_LLVM_H +#define TRITON_CONVERSION_TRITONGPU_TO_LLVM_H + +#include "TritonGPUToLLVMBase.h" + +using namespace mlir; +using namespace mlir::triton; + +void populateTritonGPUToLLVMPatterns(mlir::LLVMTypeConverter &typeConverter, + RewritePatternSet &patterns, int numWarps, + AxisInfoAnalysis &axisInfoAnalysis, + const Allocation *allocation, Value smem, + PatternBenefit benefit); + +#endif diff --git a/lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVMBase.h b/lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVMBase.h new file mode 100644 index 000000000000..6020c9617641 --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVMBase.h @@ -0,0 +1,550 @@ +#ifndef TRITON_CONVERSION_TRITONGPU_TO_LLVM_BASE_H +#define TRITON_CONVERSION_TRITONGPU_TO_LLVM_BASE_H + +// TODO: refactor so that it doesn't fail if Allocation.h +// is included after utility.h (due to conflict in `store` macro +// and +#include "triton/Analysis/Allocation.h" + +// +#include "Utility.h" +#include "mlir/IR/TypeUtilities.h" +#include "triton/Analysis/AxisInfo.h" + +using namespace mlir; +using namespace mlir::triton; + +using ::mlir::LLVM::SharedMemoryObject; +using ::mlir::triton::gpu::BlockedEncodingAttr; +using ::mlir::triton::gpu::MmaEncodingAttr; +using ::mlir::triton::gpu::SliceEncodingAttr; + +// FuncOpConversion/FuncOpConversionBase is borrowed from +// https://github.com/llvm/llvm-project/blob/fae656b2dd80246c3c6f01e9c77c49560368752c/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp#L276 +// since it is not exposed on header files in mlir v14 +// TODO(Superjomn): remove the code when MLIR v15.0 is included. +// All the rights are reserved by the LLVM community. + +struct FuncOpConversionBase : public ConvertOpToLLVMPattern { +private: + /// Only retain those attributes that are not constructed by + /// `LLVMFuncOp::build`. If `filterArgAttrs` is set, also filter out argument + /// attributes. + static void filterFuncAttributes(ArrayRef attrs, + bool filterArgAttrs, + SmallVectorImpl &result) { + for (const auto &attr : attrs) { + if (attr.getName() == SymbolTable::getSymbolAttrName() || + attr.getName() == FunctionOpInterface::getTypeAttrName() || + attr.getName() == "std.varargs" || + (filterArgAttrs && + attr.getName() == FunctionOpInterface::getArgDictAttrName())) + continue; + result.push_back(attr); + } + } + + /// Helper function for wrapping all attributes into a single DictionaryAttr + static auto wrapAsStructAttrs(OpBuilder &b, ArrayAttr attrs) { + return DictionaryAttr::get(b.getContext(), + b.getNamedAttr("llvm.struct_attrs", attrs)); + } + +protected: + using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern; + + // Convert input FuncOp to LLVMFuncOp by using the LLVMTypeConverter provided + // to this legalization pattern. + LLVM::LLVMFuncOp + convertFuncOpToLLVMFuncOp(FuncOp funcOp, + ConversionPatternRewriter &rewriter) const { + // Convert the original function arguments. They are converted using the + // LLVMTypeConverter provided to this legalization pattern. + auto varargsAttr = funcOp->getAttrOfType("func.varargs"); + TypeConverter::SignatureConversion result(funcOp.getNumArguments()); + auto llvmType = getTypeConverter()->convertFunctionSignature( + funcOp.getType(), varargsAttr && varargsAttr.getValue(), result); + if (!llvmType) + return nullptr; + + // Propagate argument/result attributes to all converted arguments/result + // obtained after converting a given original argument/result. + SmallVector attributes; + filterFuncAttributes(funcOp->getAttrs(), /*filterArgAttrs=*/true, + attributes); + if (ArrayAttr resAttrDicts = funcOp.getAllResultAttrs()) { + assert(!resAttrDicts.empty() && "expected array to be non-empty"); + auto newResAttrDicts = + (funcOp.getNumResults() == 1) + ? resAttrDicts + : rewriter.getArrayAttr( + {wrapAsStructAttrs(rewriter, resAttrDicts)}); + attributes.push_back(rewriter.getNamedAttr( + FunctionOpInterface::getResultDictAttrName(), newResAttrDicts)); + } + if (ArrayAttr argAttrDicts = funcOp.getAllArgAttrs()) { + SmallVector newArgAttrs( + llvmType.cast().getNumParams()); + for (unsigned i = 0, e = funcOp.getNumArguments(); i < e; ++i) { + auto mapping = result.getInputMapping(i); + assert(mapping && "unexpected deletion of function argument"); + for (size_t j = 0; j < mapping->size; ++j) + newArgAttrs[mapping->inputNo + j] = argAttrDicts[i]; + } + attributes.push_back( + rewriter.getNamedAttr(FunctionOpInterface::getArgDictAttrName(), + rewriter.getArrayAttr(newArgAttrs))); + } + for (const auto &pair : llvm::enumerate(attributes)) { + if (pair.value().getName() == "llvm.linkage") { + attributes.erase(attributes.begin() + pair.index()); + break; + } + } + + // Create an LLVM function, use external linkage by default until MLIR + // functions have linkage. + LLVM::Linkage linkage = LLVM::Linkage::External; + if (funcOp->hasAttr("llvm.linkage")) { + auto attr = + funcOp->getAttr("llvm.linkage").dyn_cast(); + if (!attr) { + funcOp->emitError() + << "Contains llvm.linkage attribute not of type LLVM::LinkageAttr"; + return nullptr; + } + linkage = attr.getLinkage(); + } + auto newFuncOp = rewriter.create( + funcOp.getLoc(), funcOp.getName(), llvmType, linkage, + /*dsoLocal*/ false, attributes); + rewriter.inlineRegionBefore(funcOp.getBody(), newFuncOp.getBody(), + newFuncOp.end()); + if (failed(rewriter.convertRegionTypes(&newFuncOp.getBody(), *typeConverter, + &result))) + return nullptr; + + return newFuncOp; + } +}; + +struct ConvertTritonGPUOpToLLVMPatternBase { + static Value + getStructFromSharedMemoryObject(Location loc, + const SharedMemoryObject &smemObj, + ConversionPatternRewriter &rewriter) { + auto elems = smemObj.getElems(); + auto types = smemObj.getTypes(); + auto structTy = + LLVM::LLVMStructType::getLiteral(rewriter.getContext(), types); + return getStructFromElements(loc, elems, rewriter, structTy); + } +}; + +template +class ConvertTritonGPUOpToLLVMPattern + : public ConvertOpToLLVMPattern, + public ConvertTritonGPUOpToLLVMPatternBase { +public: + using OpAdaptor = typename SourceOp::Adaptor; + + explicit ConvertTritonGPUOpToLLVMPattern(LLVMTypeConverter &typeConverter, + PatternBenefit benefit = 1) + : ConvertOpToLLVMPattern(typeConverter, benefit) {} + + explicit ConvertTritonGPUOpToLLVMPattern(LLVMTypeConverter &typeConverter, + const Allocation *allocation, + Value smem, + PatternBenefit benefit = 1) + : ConvertOpToLLVMPattern(typeConverter, benefit), + allocation(allocation), smem(smem) {} + + Value getThreadId(ConversionPatternRewriter &rewriter, Location loc) const { + auto llvmIndexTy = this->getTypeConverter()->getIndexType(); + auto cast = rewriter.create( + loc, TypeRange{llvmIndexTy}, + ValueRange{rewriter.create<::mlir::gpu::ThreadIdOp>( + loc, rewriter.getIndexType(), ::mlir::gpu::Dimension::x)}); + Value threadId = cast.getResult(0); + return threadId; + } + + // ----------------------------------------------------------------------- + // Utilities + // ----------------------------------------------------------------------- + + // Convert an \param index to a multi-dim coordinate given \param shape and + // \param order. + SmallVector delinearize(ConversionPatternRewriter &rewriter, + Location loc, Value linear, + ArrayRef shape, + ArrayRef order) const { + unsigned rank = shape.size(); + assert(rank == order.size()); + auto reordered = reorder(shape, order); + auto reorderedMultiDim = delinearize(rewriter, loc, linear, reordered); + SmallVector multiDim(rank); + for (unsigned i = 0; i < rank; ++i) { + multiDim[order[i]] = reorderedMultiDim[i]; + } + return multiDim; + } + + SmallVector delinearize(ConversionPatternRewriter &rewriter, + Location loc, Value linear, + ArrayRef shape) const { + unsigned rank = shape.size(); + assert(rank > 0); + SmallVector multiDim(rank); + if (rank == 1) { + multiDim[0] = linear; + } else { + Value remained = linear; + for (auto &&en : llvm::enumerate(shape.drop_back())) { + Value dimSize = idx_val(en.value()); + multiDim[en.index()] = urem(remained, dimSize); + remained = udiv(remained, dimSize); + } + multiDim[rank - 1] = remained; + } + return multiDim; + } + + Value linearize(ConversionPatternRewriter &rewriter, Location loc, + ArrayRef multiDim, ArrayRef shape, + ArrayRef order) const { + return linearize(rewriter, loc, reorder(multiDim, order), + reorder(shape, order)); + } + + Value linearize(ConversionPatternRewriter &rewriter, Location loc, + ArrayRef multiDim, ArrayRef shape) const { + auto rank = multiDim.size(); + Value linear = idx_val(0); + if (rank > 0) { + linear = multiDim.back(); + for (auto [dim, dimShape] : + llvm::reverse(llvm::zip(multiDim.drop_back(), shape.drop_back()))) { + Value dimSize = idx_val(dimShape); + linear = add(mul(linear, dimSize), dim); + } + } + return linear; + } + + Value dot(ConversionPatternRewriter &rewriter, Location loc, + ArrayRef offsets, ArrayRef strides) const { + assert(offsets.size() == strides.size()); + Value ret = idx_val(0); + for (auto [offset, stride] : llvm::zip(offsets, strides)) { + ret = add(ret, mul(offset, stride)); + } + return ret; + } + + // ----------------------------------------------------------------------- + // Blocked layout indices + // ----------------------------------------------------------------------- + + // Get an index-base for each dimension for a \param blocked_layout. + SmallVector + emitBaseIndexForBlockedLayout(Location loc, + ConversionPatternRewriter &rewriter, + const BlockedEncodingAttr &blocked_layout, + ArrayRef shape) const { + Value threadId = getThreadId(rewriter, loc); + Value warpSize = idx_val(32); + Value laneId = urem(threadId, warpSize); + Value warpId = udiv(threadId, warpSize); + auto sizePerThread = blocked_layout.getSizePerThread(); + auto threadsPerWarp = blocked_layout.getThreadsPerWarp(); + auto warpsPerCTA = blocked_layout.getWarpsPerCTA(); + auto order = blocked_layout.getOrder(); + unsigned rank = shape.size(); + + // delinearize threadId to get the base index + SmallVector multiDimWarpId = + delinearize(rewriter, loc, warpId, warpsPerCTA, order); + SmallVector multiDimThreadId = + delinearize(rewriter, loc, laneId, threadsPerWarp, order); + + SmallVector multiDimBase(rank); + for (unsigned k = 0; k < rank; ++k) { + // Wrap around multiDimWarpId/multiDimThreadId incase + // shape[k] > shapePerCTA[k] + auto maxWarps = + ceil(shape[k], sizePerThread[k] * threadsPerWarp[k]); + auto maxThreads = ceil(shape[k], sizePerThread[k]); + multiDimWarpId[k] = urem(multiDimWarpId[k], idx_val(maxWarps)); + multiDimThreadId[k] = urem(multiDimThreadId[k], idx_val(maxThreads)); + // multiDimBase[k] = (multiDimThreadId[k] + + // multiDimWarpId[k] * threadsPerWarp[k]) * + // sizePerThread[k]; + Value threadsPerWarpK = idx_val(threadsPerWarp[k]); + Value sizePerThreadK = idx_val(sizePerThread[k]); + multiDimBase[k] = + mul(sizePerThreadK, add(multiDimThreadId[k], + mul(multiDimWarpId[k], threadsPerWarpK))); + } + return multiDimBase; + } + + SmallVector> + emitOffsetForBlockedLayout(const BlockedEncodingAttr &blockedLayout, + ArrayRef shape) const { + auto sizePerThread = blockedLayout.getSizePerThread(); + auto threadsPerWarp = blockedLayout.getThreadsPerWarp(); + auto warpsPerCTA = blockedLayout.getWarpsPerCTA(); + auto order = blockedLayout.getOrder(); + + unsigned rank = shape.size(); + SmallVector shapePerCTA = getShapePerCTA(blockedLayout); + SmallVector tilesPerDim(rank); + for (unsigned k = 0; k < rank; ++k) + tilesPerDim[k] = ceil(shape[k], shapePerCTA[k]); + + SmallVector> offset(rank); + for (unsigned k = 0; k < rank; ++k) { + // 1 block in minimum if shape[k] is less than shapePerCTA[k] + for (unsigned blockOffset = 0; blockOffset < tilesPerDim[k]; + ++blockOffset) + for (unsigned warpOffset = 0; warpOffset < warpsPerCTA[k]; ++warpOffset) + for (unsigned threadOffset = 0; threadOffset < threadsPerWarp[k]; + ++threadOffset) + for (unsigned elemOffset = 0; elemOffset < sizePerThread[k]; + ++elemOffset) + offset[k].push_back(blockOffset * sizePerThread[k] * + threadsPerWarp[k] * warpsPerCTA[k] + + warpOffset * sizePerThread[k] * + threadsPerWarp[k] + + threadOffset * sizePerThread[k] + elemOffset); + } + + unsigned elemsPerThread = blockedLayout.getElemsPerThread(shape); + unsigned totalSizePerThread = product(sizePerThread); + SmallVector> reorderedOffset(elemsPerThread); + for (unsigned n = 0; n < elemsPerThread; ++n) { + unsigned linearNanoTileId = n / totalSizePerThread; + unsigned linearNanoTileElemId = n % totalSizePerThread; + SmallVector multiDimNanoTileId = + getMultiDimIndex(linearNanoTileId, tilesPerDim, order); + SmallVector multiDimNanoTileElemId = getMultiDimIndex( + linearNanoTileElemId, sizePerThread, order); + for (unsigned k = 0; k < rank; ++k) { + unsigned reorderedMultiDimId = + multiDimNanoTileId[k] * + (sizePerThread[k] * threadsPerWarp[k] * warpsPerCTA[k]) + + multiDimNanoTileElemId[k]; + reorderedOffset[n].push_back(offset[k][reorderedMultiDimId]); + } + } + return reorderedOffset; + } + + // ----------------------------------------------------------------------- + // Mma layout indices + // ----------------------------------------------------------------------- + + SmallVector + emitBaseIndexForMmaLayoutV1(Location loc, ConversionPatternRewriter &rewriter, + const MmaEncodingAttr &mmaLayout, + ArrayRef shape) const { + llvm_unreachable("emitIndicesForMmaLayoutV1 not implemented"); + } + + SmallVector> + emitOffsetForMmaLayoutV1(const MmaEncodingAttr &mmaLayout, + ArrayRef shape) const { + SmallVector> ret; + + for (unsigned i = 0; i < shape[0]; i += getShapePerCTA(mmaLayout)[0]) { + for (unsigned j = 0; j < shape[1]; j += getShapePerCTA(mmaLayout)[1]) { + ret.push_back({i, j}); + ret.push_back({i, j + 1}); + ret.push_back({i + 2, j}); + ret.push_back({i + 2, j + 1}); + ret.push_back({i, j + 8}); + ret.push_back({i, j + 9}); + ret.push_back({i + 2, j + 8}); + ret.push_back({i + 2, j + 9}); + } + } + return ret; + } + + SmallVector + emitBaseIndexForMmaLayoutV2(Location loc, ConversionPatternRewriter &rewriter, + const MmaEncodingAttr &mmaLayout, + ArrayRef shape) const { + auto _warpsPerCTA = mmaLayout.getWarpsPerCTA(); + assert(_warpsPerCTA.size() == 2); + SmallVector warpsPerCTA = {idx_val(_warpsPerCTA[0]), + idx_val(_warpsPerCTA[1])}; + Value threadId = getThreadId(rewriter, loc); + Value warpSize = idx_val(32); + Value laneId = urem(threadId, warpSize); + Value warpId = udiv(threadId, warpSize); + Value warpId0 = urem(warpId, warpsPerCTA[0]); + Value warpId1 = urem(udiv(warpId, warpsPerCTA[0]), warpsPerCTA[1]); + Value offWarp0 = mul(warpId0, idx_val(16)); + Value offWarp1 = mul(warpId1, idx_val(8)); + + SmallVector multiDimBase(2); + multiDimBase[0] = add(udiv(laneId, idx_val(4)), offWarp0); + multiDimBase[1] = add(mul(idx_val(2), urem(laneId, idx_val(4))), offWarp1); + return multiDimBase; + } + + SmallVector> + emitOffsetForMmaLayoutV2(const MmaEncodingAttr &mmaLayout, + ArrayRef shape) const { + SmallVector> ret; + + for (unsigned i = 0; i < shape[0]; i += getShapePerCTA(mmaLayout)[0]) { + for (unsigned j = 0; j < shape[1]; j += getShapePerCTA(mmaLayout)[1]) { + ret.push_back({i, j}); + ret.push_back({i, j + 1}); + ret.push_back({i + 8, j}); + ret.push_back({i + 8, j + 1}); + } + } + return ret; + } + + // ----------------------------------------------------------------------- + // Get offsets / indices for any layout + // ----------------------------------------------------------------------- + + SmallVector emitBaseIndexForLayout(Location loc, + ConversionPatternRewriter &rewriter, + const Attribute &layout, + ArrayRef shape) const { + if (auto blockedLayout = layout.dyn_cast()) + return emitBaseIndexForBlockedLayout(loc, rewriter, blockedLayout, shape); + if (auto mmaLayout = layout.dyn_cast()) { + if (mmaLayout.isVolta()) + return emitBaseIndexForMmaLayoutV1(loc, rewriter, mmaLayout, shape); + if (mmaLayout.isAmpere()) + return emitBaseIndexForMmaLayoutV2(loc, rewriter, mmaLayout, shape); + } + llvm_unreachable("unsupported emitBaseIndexForLayout"); + } + + SmallVector> + emitOffsetForLayout(const Attribute &layout, ArrayRef shape) const { + if (auto blockedLayout = layout.dyn_cast()) + return emitOffsetForBlockedLayout(blockedLayout, shape); + if (auto mmaLayout = layout.dyn_cast()) { + if (mmaLayout.isVolta()) + return emitOffsetForMmaLayoutV1(mmaLayout, shape); + if (mmaLayout.isAmpere()) + return emitOffsetForMmaLayoutV2(mmaLayout, shape); + } + llvm_unreachable("unsupported emitOffsetForLayout"); + } + + // Emit indices calculation within each ConversionPattern, and returns a + // [elemsPerThread X rank] index matrix. + + // TODO: [phil] redundant indices computation do not appear to hurt + // performance much, but they could still significantly slow down + // computations. + SmallVector> emitIndicesForDistributedLayout( + Location loc, ConversionPatternRewriter &rewriter, + const Attribute &layout, ArrayRef shape) const { + + // step 1, delinearize threadId to get the base index + auto multiDimBase = emitBaseIndexForLayout(loc, rewriter, layout, shape); + // step 2, get offset of each element + auto offset = emitOffsetForLayout(layout, shape); + // step 3, add offset to base, and reorder the sequence of indices to + // guarantee that elems in the same sizePerThread are adjacent in order + unsigned rank = shape.size(); + unsigned elemsPerThread = offset.size(); + SmallVector> multiDimIdx(elemsPerThread, + SmallVector(rank)); + for (unsigned n = 0; n < elemsPerThread; ++n) + for (unsigned k = 0; k < rank; ++k) + multiDimIdx[n][k] = add(multiDimBase[k], idx_val(offset[n][k])); + + return multiDimIdx; + } + + struct SmallVectorKeyInfo { + static unsigned getHashValue(const SmallVector &key) { + return llvm::hash_combine_range(key.begin(), key.end()); + } + static bool isEqual(const SmallVector &lhs, + const SmallVector &rhs) { + return lhs == rhs; + } + static SmallVector getEmptyKey() { + return SmallVector(); + } + static SmallVector getTombstoneKey() { + return {std::numeric_limits::max()}; + } + }; + + SmallVector> + emitIndicesForSliceLayout(Location loc, ConversionPatternRewriter &rewriter, + const SliceEncodingAttr &sliceLayout, + ArrayRef shape) const { + auto parent = sliceLayout.getParent(); + unsigned dim = sliceLayout.getDim(); + size_t rank = shape.size(); + auto parentIndices = + emitIndices(loc, rewriter, parent, sliceLayout.paddedShape(shape)); + unsigned numIndices = parentIndices.size(); + SmallVector> resultIndices; + for (unsigned i = 0; i < numIndices; ++i) { + SmallVector indices = parentIndices[i]; + indices.erase(indices.begin() + dim); + resultIndices.push_back(indices); + } + return resultIndices; + } + + // ----------------------------------------------------------------------- + // Emit indices + // ----------------------------------------------------------------------- + SmallVector> emitIndices(Location loc, + ConversionPatternRewriter &b, + const Attribute &layout, + ArrayRef shape) const { + if (auto blocked = layout.dyn_cast()) { + return emitIndicesForDistributedLayout(loc, b, blocked, shape); + } else if (auto mma = layout.dyn_cast()) { + return emitIndicesForDistributedLayout(loc, b, mma, shape); + } else if (auto slice = layout.dyn_cast()) { + return emitIndicesForSliceLayout(loc, b, slice, shape); + } else { + assert(0 && "emitIndices for layouts other than blocked & slice not " + "implemented yet"); + return {}; + } + } + + // ----------------------------------------------------------------------- + // Shared memory utilities + // ----------------------------------------------------------------------- + template + Value getSharedMemoryBase(Location loc, ConversionPatternRewriter &rewriter, + T value) const { + + auto ptrTy = LLVM::LLVMPointerType::get( + this->getTypeConverter()->convertType(rewriter.getI8Type()), 3); + auto bufferId = allocation->getBufferId(value); + assert(bufferId != Allocation::InvalidBufferId && "BufferId not found"); + size_t offset = allocation->getOffset(bufferId); + Value offVal = idx_val(offset); + Value base = gep(ptrTy, smem, offVal); + return base; + } + +protected: + const Allocation *allocation; + Value smem; +}; + +#endif diff --git a/lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVMPass.cpp b/lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVMPass.cpp new file mode 100644 index 000000000000..797cd6f6da25 --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/TritonGPUToLLVMPass.cpp @@ -0,0 +1,406 @@ +#include "triton/Conversion/TritonGPUToLLVM/TritonGPUToLLVMPass.h" + +#include "mlir/Conversion/ArithmeticToLLVM/ArithmeticToLLVM.h" +#include "mlir/Conversion/GPUToNVVM/GPUToNVVMPass.h" +#include "mlir/Conversion/MathToLLVM/MathToLLVM.h" +#include "mlir/Conversion/SCFToStandard/SCFToStandard.h" +#include "mlir/Conversion/StandardToLLVM/ConvertStandardToLLVM.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/LLVMIR/NVVMDialect.h" +#include "mlir/Pass/Pass.h" +#include "triton/Analysis/Allocation.h" +#include "triton/Analysis/AxisInfo.h" +#include "triton/Analysis/Membar.h" +#include "triton/Dialect/Triton/IR/Dialect.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" + +#include "ConvertLayoutOpToLLVM.h" +#include "DotOpToLLVM.h" +#include "ElementwiseOpToLLVM.h" +#include "LoadStoreOpToLLVM.h" +#include "ReduceOpToLLVM.h" +#include "TritonGPUToLLVM.h" +#include "TypeConverter.h" +#include "ViewOpToLLVM.h" + +using namespace mlir; +using namespace mlir::triton; + +#define GEN_PASS_CLASSES +#include "triton/Conversion/Passes.h.inc" + +namespace mlir { + +class TritonLLVMConversionTarget : public ConversionTarget { +public: + explicit TritonLLVMConversionTarget(MLIRContext &ctx) + : ConversionTarget(ctx) { + addLegalDialect(); + addLegalDialect(); + addIllegalDialect(); + addIllegalDialect(); + addIllegalDialect(); + addIllegalDialect(); + addLegalOp(); + } +}; + +class TritonLLVMFunctionConversionTarget : public ConversionTarget { +public: + explicit TritonLLVMFunctionConversionTarget(MLIRContext &ctx) + : ConversionTarget(ctx) { + addLegalDialect(); + addLegalDialect(); + addIllegalOp(); + addLegalOp(); + } +}; + +} // namespace mlir + +namespace { + +/// FuncOp legalization pattern that converts MemRef arguments to pointers to +/// MemRef descriptors (LLVM struct data types) containing all the MemRef type +/// information. +struct FuncOpConversion : public FuncOpConversionBase { + FuncOpConversion(LLVMTypeConverter &converter, int numWarps, + PatternBenefit benefit) + : FuncOpConversionBase(converter, benefit), numWarps(numWarps) {} + + LogicalResult + matchAndRewrite(FuncOp funcOp, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto newFuncOp = convertFuncOpToLLVMFuncOp(funcOp, rewriter); + if (!newFuncOp) + return failure(); + + auto ctx = funcOp->getContext(); + + // Set an attribute to indicate this function is a kernel entry. + newFuncOp->setAttr("nvvm.kernel", + rewriter.getIntegerAttr(type::u1Ty(ctx), 1)); + + // Set an attribute for maxntidx, it could be used in latter LLVM codegen + // for `nvvm.annotation` metadata. + newFuncOp->setAttr("nvvm.maxntid", + rewriter.getIntegerAttr(i32_ty, 32 * numWarps)); + + rewriter.eraseOp(funcOp); + return success(); + } + +private: + int numWarps{0}; +}; + +class ConvertTritonGPUToLLVM + : public ConvertTritonGPUToLLVMBase { + +public: + explicit ConvertTritonGPUToLLVM(int computeCapability) + : computeCapability(computeCapability) {} + + void runOnOperation() override { + MLIRContext *context = &getContext(); + ModuleOp mod = getOperation(); + + mlir::LowerToLLVMOptions option(context); + option.overrideIndexBitwidth(32); + TritonGPUToLLVMTypeConverter typeConverter(context, option); + TritonLLVMFunctionConversionTarget funcTarget(*context); + TritonLLVMConversionTarget target(*context); + + int numWarps = triton::gpu::TritonGPUDialect::getNumWarps(mod); + + // Step 1: Decompose unoptimized layout conversions to use shared memory + // Step 2: Decompose insert_slice_async to use load + insert_slice for + // pre-Ampere architectures or unsupported vectorized load sizes + // Step 3: Allocate shared memories and insert barriers + // Step 4: Convert SCF to CFG + // Step 5: Convert FuncOp to LLVMFuncOp via partial conversion + // Step 6: Get axis and shared memory info + // Step 7: Convert the rest of ops via partial conversion + // + // The reason for putting step 3 before step 4 is that the membar + // analysis currently only supports SCF but not CFG. The reason for a + // separation between 5/7 is that, step 6 is out of the scope of Dialect + // Conversion, thus we need to make sure the smem is not revised during the + // conversion of step 7. + + // Step 1 + decomposeMmaToDotOperand(mod, numWarps); + decomposeBlockedToDotOperand(mod); + + // Step 2 + decomposeInsertSliceAsyncOp(mod); + + // Step 3 + Allocation allocation(mod); + MembarAnalysis membarPass(&allocation); + membarPass.run(); + + // Step 4 + RewritePatternSet scf_patterns(context); + mlir::populateLoopToStdConversionPatterns(scf_patterns); + mlir::ConversionTarget scf_target(*context); + scf_target.addIllegalOp(); + scf_target.markUnknownOpDynamicallyLegal([](Operation *) { return true; }); + if (failed( + applyPartialConversion(mod, scf_target, std::move(scf_patterns)))) + return signalPassFailure(); + + // Step 5 + RewritePatternSet func_patterns(context); + func_patterns.add(typeConverter, numWarps, /*benefit=*/1); + if (failed( + applyPartialConversion(mod, funcTarget, std::move(func_patterns)))) + return signalPassFailure(); + + // Step 6 - get axis and shared memory info + AxisInfoAnalysis axisInfoAnalysis(mod.getContext()); + axisInfoAnalysis.run(mod); + initSharedMemory(allocation.getSharedMemorySize(), typeConverter); + mod->setAttr("triton_gpu.shared", + mlir::IntegerAttr::get(mlir::IntegerType::get(context, 32), + allocation.getSharedMemorySize())); + + // Step 7 - rewrite rest of ops + // We set a higher benefit here to ensure triton's patterns runs before + // arith patterns for some encoding not supported by the community + // patterns. + RewritePatternSet patterns(context); + + // Normal conversions + populateTritonGPUToLLVMPatterns(typeConverter, patterns, numWarps, + axisInfoAnalysis, &allocation, smem, + /*benefit=*/10); + // ConvertLayoutOp + populateConvertLayoutOpToLLVMPatterns(typeConverter, patterns, numWarps, + axisInfoAnalysis, &allocation, smem, + /*benefit=*/10); + // DotOp + populateDotOpToLLVMPatterns(typeConverter, patterns, numWarps, + axisInfoAnalysis, &allocation, smem, + /*benefit=*/10); + // ElementwiseOp + populateElementwiseOpToLLVMPatterns(typeConverter, patterns, numWarps, + axisInfoAnalysis, &allocation, smem, + /*benefit=*/10); + // LoadStoreOp + populateLoadStoreOpToLLVMPatterns(typeConverter, patterns, numWarps, + axisInfoAnalysis, &allocation, smem, + /*benefit=*/10); + // ReduceOp + populateReduceOpToLLVMPatterns(typeConverter, patterns, numWarps, + axisInfoAnalysis, &allocation, smem, + /*benefit=*/10); + // ViewOp + populateViewOpToLLVMPatterns(typeConverter, patterns, numWarps, + axisInfoAnalysis, &allocation, smem, + /*benefit=*/10); + + // Add arith/math's patterns to help convert scalar expression to LLVM. + mlir::arith::populateArithmeticToLLVMConversionPatterns(typeConverter, + patterns); + mlir::populateMathToLLVMConversionPatterns(typeConverter, patterns); + mlir::populateStdToLLVMConversionPatterns(typeConverter, patterns); + mlir::populateGpuToNVVMConversionPatterns(typeConverter, patterns); + + if (failed(applyPartialConversion(mod, target, std::move(patterns)))) + return signalPassFailure(); + } + +private: + Value smem; + + int computeCapability{}; + + void initSharedMemory(size_t size, + TritonGPUToLLVMTypeConverter &typeConverter) { + ModuleOp mod = getOperation(); + OpBuilder b(mod.getBodyRegion()); + auto loc = mod.getLoc(); + auto elemTy = typeConverter.convertType(b.getIntegerType(8)); + // Set array size 0 and external linkage indicates that we use dynamic + // shared allocation to allow a larger shared memory size for each kernel. + auto arrayTy = LLVM::LLVMArrayType::get(elemTy, 0); + auto global = b.create( + loc, arrayTy, /*isConstant=*/false, LLVM::Linkage::External, + "global_smem", /*value=*/Attribute(), /*alignment=*/0, + mlir::gpu::GPUDialect::getWorkgroupAddressSpace()); + SmallVector funcs; + mod.walk([&](LLVM::LLVMFuncOp func) { funcs.push_back(func); }); + assert(funcs.size() == 1 && + "Inliner pass is expected before TritonGPUToLLVM"); + b.setInsertionPointToStart(&funcs[0].getBody().front()); + smem = b.create(loc, global); + auto ptrTy = + LLVM::LLVMPointerType::get(typeConverter.convertType(b.getI8Type()), 3); + smem = b.create(loc, ptrTy, smem); + } + + void decomposeMmaToDotOperand(ModuleOp mod, int numWarps) const { + // Replace `mma -> dot_op` with `mma -> blocked -> dot_op` + // unless certain conditions are met + mod.walk([&](triton::gpu::ConvertLayoutOp cvtOp) -> void { + OpBuilder builder(cvtOp); + auto srcType = cvtOp.getOperand().getType().cast(); + auto dstType = cvtOp.getType().cast(); + auto srcMma = + srcType.getEncoding().dyn_cast(); + auto dstDotOp = + dstType.getEncoding().dyn_cast(); + if (srcMma && dstDotOp && !isMmaToDotShortcut(srcMma, dstDotOp)) { + auto tmpType = RankedTensorType::get( + dstType.getShape(), dstType.getElementType(), + triton::gpu::BlockedEncodingAttr::get( + mod.getContext(), srcType.getShape(), getSizePerThread(srcMma), + getOrder(srcMma), numWarps)); + auto tmp = builder.create( + cvtOp.getLoc(), tmpType, cvtOp.getOperand()); + auto newConvert = builder.create( + cvtOp.getLoc(), dstType, tmp); + cvtOp.replaceAllUsesWith(newConvert.getResult()); + cvtOp.erase(); + } + }); + } + + void decomposeBlockedToDotOperand(ModuleOp mod) const { + // Replace `blocked -> dot_op` with `blocked -> shared -> dot_op` + // because the codegen doesn't handle `blocked -> dot_op` directly + mod.walk([&](triton::gpu::ConvertLayoutOp cvtOp) -> void { + OpBuilder builder(cvtOp); + auto srcType = cvtOp.getOperand().getType().cast(); + auto dstType = cvtOp.getType().cast(); + auto srcBlocked = + srcType.getEncoding().dyn_cast(); + auto dstDotOp = + dstType.getEncoding().dyn_cast(); + if (srcBlocked && dstDotOp) { + auto tmpType = RankedTensorType::get( + dstType.getShape(), dstType.getElementType(), + triton::gpu::SharedEncodingAttr::get( + mod.getContext(), dstDotOp, srcType.getShape(), + getOrder(srcBlocked), srcType.getElementType())); + auto tmp = builder.create( + cvtOp.getLoc(), tmpType, cvtOp.getOperand()); + auto newConvert = builder.create( + cvtOp.getLoc(), dstType, tmp); + cvtOp.replaceAllUsesWith(newConvert.getResult()); + cvtOp.erase(); + } + }); + } + + void decomposeInsertSliceAsyncOp(ModuleOp mod) const { + AxisInfoAnalysis axisInfoAnalysis(mod.getContext()); + axisInfoAnalysis.run(mod); + // TODO(Keren): This is a hacky knob that may cause performance regression + // when decomposition has been performed. We should remove this knob once we + // have thorough analysis on async wait. Currently, we decompose + // `insert_slice_async` into `load` and `insert_slice` without knowing which + // `async_wait` is responsible for the `insert_slice_async`. To guarantee + // correctness, we blindly set the `async_wait` to wait for all async ops. + // + // There are two options to improve this: + // 1. We can perform a dataflow analysis to find the `async_wait` that is + // responsible for the `insert_slice_async` in the backend. + // 2. We can modify the pipeline to perform the decomposition before the + // `async_wait` is inserted. However, it is also risky because we don't know + // the correct vectorized shape yet in the pipeline pass. Making the + // pipeline pass aware of the vectorization could introduce additional + // dependencies on the AxisInfoAnalysis and the Coalesce analysis. + bool decomposed = false; + // insert_slice_async %src, %dst, %idx, %mask, %other + // => + // %tmp = load %src, %mask, %other + // %res = insert_slice %tmp into %dst[%idx] + mod.walk([&](triton::gpu::InsertSliceAsyncOp insertSliceAsyncOp) -> void { + OpBuilder builder(insertSliceAsyncOp); + + // Get the vectorized load size + auto src = insertSliceAsyncOp.src(); + auto dst = insertSliceAsyncOp.dst(); + auto srcTy = src.getType().cast(); + auto dstTy = dst.getType().cast(); + auto srcBlocked = + srcTy.getEncoding().dyn_cast(); + auto resSharedLayout = + dstTy.getEncoding().dyn_cast(); + auto resElemTy = dstTy.getElementType(); + unsigned inVec = axisInfoAnalysis.getPtrVectorSize(src); + unsigned outVec = resSharedLayout.getVec(); + unsigned minVec = std::min(outVec, inVec); + auto maxBitWidth = + std::max(128, resElemTy.getIntOrFloatBitWidth()); + auto vecBitWidth = resElemTy.getIntOrFloatBitWidth() * minVec; + auto bitWidth = std::min(maxBitWidth, vecBitWidth); + auto byteWidth = bitWidth / 8; + + // If the load byte width is not eligible or the current compute + // capability does not support async copy, then we do decompose + if (triton::gpu::InsertSliceAsyncOp::getEligibleLoadByteWidth( + computeCapability) + .contains(byteWidth)) + return; + + // load + auto tmpTy = + RankedTensorType::get(srcTy.getShape(), resElemTy, srcBlocked); + auto loadOp = builder.create( + insertSliceAsyncOp.getLoc(), tmpTy, insertSliceAsyncOp.src(), + insertSliceAsyncOp.mask(), insertSliceAsyncOp.other(), + insertSliceAsyncOp.cache(), insertSliceAsyncOp.evict(), + insertSliceAsyncOp.isVolatile()); + + // insert_slice + auto axis = insertSliceAsyncOp.axis(); + auto intAttr = [&](int64_t v) { return builder.getI64IntegerAttr(v); }; + auto offsets = SmallVector(dstTy.getRank(), intAttr(0)); + auto sizes = SmallVector(dstTy.getRank(), intAttr(1)); + auto strides = SmallVector(dstTy.getRank(), intAttr(1)); + offsets[axis] = insertSliceAsyncOp.index(); + for (size_t i = 0; i < dstTy.getRank(); i++) { + if (i != axis) + sizes[i] = intAttr(dstTy.getShape()[i]); + } + auto insertSliceOp = builder.create( + insertSliceAsyncOp.getLoc(), loadOp, insertSliceAsyncOp.dst(), + offsets, sizes, strides); + + // Replace + insertSliceAsyncOp.replaceAllUsesWith(insertSliceOp.getResult()); + insertSliceAsyncOp.erase(); + decomposed = true; + }); + + mod.walk([&](triton::gpu::AsyncWaitOp asyncWaitOp) -> void { + if (!triton::gpu::AsyncWaitOp::isSupported(computeCapability)) { + // async wait is supported in Ampere and later + asyncWaitOp.erase(); + } else if (decomposed) { + // Wait for all previous async ops + OpBuilder builder(asyncWaitOp); + auto newAsyncWaitOp = + builder.create(asyncWaitOp.getLoc(), 0); + asyncWaitOp.erase(); + } + }); + } +}; + +} // anonymous namespace + +namespace mlir { +namespace triton { + +std::unique_ptr> +createConvertTritonGPUToLLVMPass(int computeCapability) { + return std::make_unique<::ConvertTritonGPUToLLVM>(computeCapability); +} + +} // namespace triton +} // namespace mlir diff --git a/lib/Conversion/TritonGPUToLLVM/TypeConverter.h b/lib/Conversion/TritonGPUToLLVM/TypeConverter.h new file mode 100644 index 000000000000..5259b1ca421f --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/TypeConverter.h @@ -0,0 +1,150 @@ +#ifndef TRITON_CONVERSION_TRITONGPU_TO_LLVM_TYPECONVERTER_H +#define TRITON_CONVERSION_TRITONGPU_TO_LLVM_TYPECONVERTER_H + +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "triton/Conversion/MLIRTypes.h" + +#include "DotOpHelpers.h" +#include "Utility.h" + +using namespace mlir; +using namespace mlir::triton; + +using ::mlir::LLVM::DotOpFMAConversionHelper; +using ::mlir::LLVM::DotOpMmaV1ConversionHelper; +using ::mlir::LLVM::MMA16816ConversionHelper; +using ::mlir::triton::gpu::BlockedEncodingAttr; +using ::mlir::triton::gpu::DotOperandEncodingAttr; +using ::mlir::triton::gpu::getElemsPerThread; +using ::mlir::triton::gpu::MmaEncodingAttr; +using ::mlir::triton::gpu::SharedEncodingAttr; +using ::mlir::triton::gpu::SliceEncodingAttr; + +class TritonGPUToLLVMTypeConverter : public LLVMTypeConverter { +public: + using TypeConverter::convertType; + + TritonGPUToLLVMTypeConverter(MLIRContext *ctx, LowerToLLVMOptions &option, + const DataLayoutAnalysis *analysis = nullptr) + : LLVMTypeConverter(ctx, option, analysis) { + addConversion([&](triton::PointerType type) -> llvm::Optional { + return convertTritonPointerType(type); + }); + addConversion([&](RankedTensorType type) -> llvm::Optional { + return convertTritonTensorType(type); + }); + // Internally store float8 as int8 + addConversion([&](triton::Float8Type type) -> llvm::Optional { + return IntegerType::get(type.getContext(), 8); + }); + // Internally store bfloat16 as int16 + addConversion([&](BFloat16Type type) -> llvm::Optional { + return IntegerType::get(type.getContext(), 16); + }); + } + + Type convertTritonPointerType(triton::PointerType type) { + // Recursively translate pointee type + return LLVM::LLVMPointerType::get(convertType(type.getPointeeType()), + type.getAddressSpace()); + } + + llvm::Optional convertTritonTensorType(RankedTensorType type) { + auto ctx = type.getContext(); + Attribute layout = type.getEncoding(); + SmallVector shape(type.getShape().begin(), type.getShape().end()); + + if (layout && + (layout.isa() || layout.isa() || + layout.isa())) { + unsigned numElementsPerThread = getElemsPerThread(type); + SmallVector types(numElementsPerThread, + convertType(type.getElementType())); + return LLVM::LLVMStructType::getLiteral(ctx, types); + } else if (auto shared_layout = + layout.dyn_cast_or_null()) { + SmallVector types; + // base ptr + auto ptrType = + LLVM::LLVMPointerType::get(convertType(type.getElementType()), 3); + types.push_back(ptrType); + // shape dims + auto rank = type.getRank(); + // offsets + strides + for (auto i = 0; i < rank * 2; i++) { + types.push_back(IntegerType::get(ctx, 32)); + } + return LLVM::LLVMStructType::getLiteral(ctx, types); + } else if (auto dotOpLayout = + layout.dyn_cast_or_null()) { + if (dotOpLayout.getParent() + .isa()) { // for parent is blocked layout + int numElemsPerThread = + DotOpFMAConversionHelper::getNumElemsPerThread(shape, dotOpLayout); + + return LLVM::LLVMStructType::getLiteral( + ctx, SmallVector(numElemsPerThread, type::f32Ty(ctx))); + } else { // for parent is MMA layout + auto mmaLayout = dotOpLayout.getParent().cast(); + auto wpt = mmaLayout.getWarpsPerCTA(); + Type elemTy = convertType(type.getElementType()); + if (mmaLayout.isAmpere()) { + const llvm::DenseMap targetTyMap = { + {32, elemTy}, + {16, vec_ty(elemTy, 2)}, + {8, vec_ty(elemTy, 4)}, + }; + Type targetTy; + if (targetTyMap.count(elemTy.getIntOrFloatBitWidth())) { + targetTy = targetTyMap.lookup(elemTy.getIntOrFloatBitWidth()); + } else { + assert(false && "Unsupported element type"); + } + if (dotOpLayout.getOpIdx() == 0) { // $a + auto elems = + MMA16816ConversionHelper::getANumElemsPerThread(type, wpt[0]); + return LLVM::LLVMStructType::getLiteral( + ctx, SmallVector(elems, targetTy)); + } + if (dotOpLayout.getOpIdx() == 1) { // $b + auto elems = + MMA16816ConversionHelper::getBNumElemsPerThread(type, wpt[1]); + return struct_ty(SmallVector(elems, targetTy)); + } + } + + if (mmaLayout.isVolta()) { + DotOpMmaV1ConversionHelper helper(mmaLayout); + + // TODO[Superjomn]: Both transA and transB are not available here. + bool trans = false; + // TODO[Superjomn]: The order of A and B are not available here. + SmallVector order({1, 0}); + if (trans) { + std::swap(shape[0], shape[1]); + std::swap(order[0], order[1]); + } + + if (dotOpLayout.getOpIdx() == 0) { // $a + int elems = helper.numElemsPerThreadA(shape, order); + Type x2Ty = vec_ty(elemTy, 2); + return struct_ty(SmallVector(elems, x2Ty)); + } + if (dotOpLayout.getOpIdx() == 1) { // $b + int elems = helper.numElemsPerThreadB(shape, order); + Type x2Ty = vec_ty(elemTy, 2); + return struct_ty(SmallVector(elems, x2Ty)); + } + } + } + + llvm::errs() << "Unexpected dot operand layout detected in " + "TritonToLLVMTypeConverter"; + return llvm::None; + } + + return llvm::None; + } +}; + +#endif diff --git a/lib/Conversion/TritonGPUToLLVM/Utility.h b/lib/Conversion/TritonGPUToLLVM/Utility.h new file mode 100644 index 000000000000..4d7c558fc296 --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/Utility.h @@ -0,0 +1,369 @@ +#ifndef TRITON_CONVERSION_TRITONGPU_TO_LLVM_UTILITY_H +#define TRITON_CONVERSION_TRITONGPU_TO_LLVM_UTILITY_H + +#include "mlir/Conversion/LLVMCommon/Pattern.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "triton/Analysis/Utility.h" +#include "triton/Conversion/MLIRTypes.h" +#include "triton/Conversion/TritonGPUToLLVM/PTXAsmFormat.h" + +// Shortcuts for some commonly used LLVM ops to keep code simple and intuitive +// Operators +#define inttoptr(...) rewriter.create(loc, __VA_ARGS__) +#define ptrtoint(...) rewriter.create(loc, __VA_ARGS__) +#define zext(...) rewriter.create(loc, __VA_ARGS__) +#define udiv(...) rewriter.create(loc, __VA_ARGS__) +#define urem(...) rewriter.create(loc, __VA_ARGS__) +#define add(...) rewriter.create(loc, __VA_ARGS__) +#define sub(...) rewriter.create(loc, __VA_ARGS__) +#define fadd(...) rewriter.create(loc, __VA_ARGS__) +#define mul(...) rewriter.create(loc, __VA_ARGS__) +#define fmul(...) rewriter.create(loc, __VA_ARGS__) +#define smax(...) rewriter.create(loc, __VA_ARGS__) +#define umax(...) rewriter.create(loc, __VA_ARGS__) +#define fmax(...) rewriter.create(loc, __VA_ARGS__) +#define smin(...) rewriter.create(loc, __VA_ARGS__) +#define umin(...) rewriter.create(loc, __VA_ARGS__) +#define fmin(...) rewriter.create(loc, __VA_ARGS__) +#define and_(...) rewriter.create(loc, __VA_ARGS__) +#define xor_(...) rewriter.create(loc, __VA_ARGS__) +#define bitcast(val__, type__) \ + rewriter.create(loc, type__, val__) +#define gep(...) rewriter.create(loc, __VA_ARGS__) +#define ptr_ty(...) LLVM::LLVMPointerType::get(__VA_ARGS__) +#define insert_val(...) rewriter.create(loc, __VA_ARGS__) +#define extract_val(...) rewriter.create(loc, __VA_ARGS__) +#define insert_element(...) \ + rewriter.create(loc, __VA_ARGS__) +#define extract_element(...) \ + rewriter.create(loc, __VA_ARGS__) +#define load(...) rewriter.create(loc, __VA_ARGS__) +#define store(val, ptr) rewriter.create(loc, val, ptr) +#define fcmp_ogt(lhs, rhs) \ + rewriter.create(loc, rewriter.getI1Type(), \ + LLVM::FCmpPredicate::ogt, lhs, rhs) +#define fcmp_olt(lhs, rhs) \ + rewriter.create(loc, rewriter.getI1Type(), \ + LLVM::FCmpPredicate::olt, lhs, rhs) +#define icmp_eq(...) \ + rewriter.create(loc, LLVM::ICmpPredicate::eq, __VA_ARGS__) +#define icmp_ne(...) \ + rewriter.create(loc, LLVM::ICmpPredicate::ne, __VA_ARGS__) +#define icmp_slt(...) \ + rewriter.create(loc, LLVM::ICmpPredicate::slt, __VA_ARGS__) +#define icmp_sle(...) \ + rewriter.create(loc, LLVM::ICmpPredicate::sle, __VA_ARGS__) +#define icmp_sgt(...) \ + rewriter.create(loc, LLVM::ICmpPredicate::sgt, __VA_ARGS__) +#define icmp_sge(...) \ + rewriter.create(loc, LLVM::ICmpPredicate::sge, __VA_ARGS__) +#define icmp_ult(...) \ + rewriter.create(loc, LLVM::ICmpPredicate::ult, __VA_ARGS__) +#define icmp_ule(...) \ + rewriter.create(loc, LLVM::ICmpPredicate::ule, __VA_ARGS__) +#define icmp_ugt(...) \ + rewriter.create(loc, LLVM::ICmpPredicate::ugt, __VA_ARGS__) +#define icmp_uge(...) \ + rewriter.create(loc, LLVM::ICmpPredicate::uge, __VA_ARGS__) +#define select(...) rewriter.create(loc, __VA_ARGS__) +#define address_of(...) rewriter.create(loc, __VA_ARGS__) +#define barrier() rewriter.create(loc) +#define undef(...) rewriter.create(loc, __VA_ARGS__) + +// Types +#define i32_ty rewriter.getIntegerType(32) +#define i16_ty rewriter.getIntegerType(16) +#define ui32_ty rewriter.getIntegerType(32, false) +#define f16_ty rewriter.getF16Type() +#define bf16_ty rewriter.getBF16Type() +#define i8_ty rewriter.getIntegerType(8) +#define f32_ty rewriter.getF32Type() +#define f64_ty rewriter.getF64Type() +#define vec_ty(type, num) VectorType::get(num, type) +#define f32_val(...) LLVM::createConstantF32(loc, rewriter, __VA_ARGS__) +#define f64_val(...) LLVM::createConstantF64(loc, rewriter, __VA_ARGS__) +#define void_ty(ctx) LLVM::LLVMVoidType::get(ctx) +#define struct_ty(...) LLVM::LLVMStructType::getLiteral(ctx, __VA_ARGS__) +#define array_ty(elemTy, count) LLVM::LLVMArrayType::get(elemTy, count) + +// Constants +#define i32_val(...) LLVM::createConstantI32(loc, rewriter, __VA_ARGS__) +#define int_val(width, val) \ + LLVM::createLLVMIntegerConstant(rewriter, loc, width, val) +#define idx_val(...) \ + LLVM::createIndexConstant(rewriter, loc, this->getTypeConverter(), \ + __VA_ARGS__) +#define tid_val() getThreadId(rewriter, loc) + +namespace mlir { +namespace triton { + +// Delinearize supposing order is [0, 1, .. , n] +template +llvm::SmallVector getMultiDimIndexImpl(T linearIndex, + llvm::ArrayRef shape) { + // shape: {a, b, c, d} -> accMul: {1, a, a*b, a*b*c} + size_t rank = shape.size(); + T accMul = product(shape.drop_back()); + T linearRemain = linearIndex; + llvm::SmallVector multiDimIndex(rank); + for (int i = rank - 1; i >= 0; --i) { + multiDimIndex[i] = linearRemain / accMul; + linearRemain = linearRemain % accMul; + if (i != 0) { + accMul = accMul / shape[i - 1]; + } + } + return multiDimIndex; +} + +template +llvm::SmallVector getMultiDimIndex(T linearIndex, llvm::ArrayRef shape, + llvm::ArrayRef order) { + size_t rank = shape.size(); + assert(rank == order.size()); + auto reordered = reorder(shape, order); + auto reorderedMultiDim = getMultiDimIndexImpl(linearIndex, reordered); + llvm::SmallVector multiDim(rank); + for (unsigned i = 0; i < rank; ++i) { + multiDim[order[i]] = reorderedMultiDim[i]; + } + return multiDim; +} + +// Linearize supposing order is [0, 1, .. , n] +template +static T getLinearIndexImpl(llvm::ArrayRef multiDimIndex, + llvm::ArrayRef shape) { + assert(multiDimIndex.size() == shape.size()); + // shape: {a, b, c, d} -> accMul: {1, a, a*b, a*b*c} + size_t rank = shape.size(); + T accMul = product(shape.drop_back()); + T linearIndex = 0; + for (int i = rank - 1; i >= 0; --i) { + linearIndex += multiDimIndex[i] * accMul; + if (i != 0) { + accMul = accMul / shape[i - 1]; + } + } + return linearIndex; +} + +template +static T getLinearIndex(llvm::ArrayRef multiDimIndex, + llvm::ArrayRef shape, + llvm::ArrayRef order) { + assert(shape.size() == order.size()); + return getLinearIndexImpl(reorder(multiDimIndex, order), + reorder(shape, order)); +} + +} // namespace triton + +namespace LLVM { +using namespace mlir::triton; + +static Value getStructFromElements(Location loc, ValueRange resultVals, + ConversionPatternRewriter &rewriter, + Type structType) { + if (!structType.isa()) { + return *resultVals.begin(); + } + + Value llvmStruct = rewriter.create(loc, structType); + for (const auto &v : llvm::enumerate(resultVals)) { + assert(v.value() && "can not insert null values"); + llvmStruct = insert_val(structType, llvmStruct, v.value(), + rewriter.getI64ArrayAttr(v.index())); + } + return llvmStruct; +} + +static SmallVector +getElementsFromStruct(Location loc, Value llvmStruct, + ConversionPatternRewriter &rewriter) { + if (llvmStruct.getType().isIntOrIndexOrFloat() || + llvmStruct.getType().isa() || + llvmStruct.getType().isa()) + return {llvmStruct}; + ArrayRef types = + llvmStruct.getType().cast().getBody(); + SmallVector results(types.size()); + for (unsigned i = 0; i < types.size(); ++i) { + Type type = types[i]; + results[i] = extract_val(type, llvmStruct, rewriter.getI64ArrayAttr(i)); + } + return results; +} + +// Create a 32-bit integer constant. +static Value createConstantI32(Location loc, PatternRewriter &rewriter, + int32_t v) { + auto i32ty = rewriter.getIntegerType(32); + return rewriter.create(loc, i32ty, + IntegerAttr::get(i32ty, v)); +} + +static Value createConstantF32(Location loc, PatternRewriter &rewriter, + float v) { + auto type = type::f32Ty(rewriter.getContext()); + return rewriter.create(loc, type, + rewriter.getF32FloatAttr(v)); +} + +static Value createConstantF64(Location loc, PatternRewriter &rewriter, + float v) { + auto type = type::f64Ty(rewriter.getContext()); + return rewriter.create(loc, type, + rewriter.getF64FloatAttr(v)); +} + +// Create an index type constant. +static Value createIndexConstant(OpBuilder &builder, Location loc, + TypeConverter *converter, int64_t value) { + Type ty = converter->convertType(builder.getIndexType()); + return builder.create(loc, ty, + builder.getIntegerAttr(ty, value)); +} + +// Create an integer constant of \param width bits. +static Value createLLVMIntegerConstant(OpBuilder &builder, Location loc, + short width, int64_t value) { + Type ty = builder.getIntegerType(width); + return builder.create(loc, ty, + builder.getIntegerAttr(ty, value)); +} + +/// Helper function to get strides from a given shape and its order +static SmallVector +getStridesFromShapeAndOrder(ArrayRef shape, ArrayRef order, + Location loc, ConversionPatternRewriter &rewriter) { + auto rank = shape.size(); + SmallVector strides(rank); + int64_t stride = 1; + for (auto idx : order) { + strides[idx] = i32_val(stride); + stride *= shape[idx]; + } + return strides; +} + +struct SharedMemoryObject { + Value base; // i32 ptr. The start address of the shared memory object. + // We need to store strides as Values but not integers because the + // extract_slice instruction can take a slice at arbitrary offsets. + // Take $a[16:32, 16:32] as an example, though we know the stride of $a[0] is + // 32, we need to let the instruction that uses $a to be aware of that. + // Otherwise, when we use $a, we only know that the shape of $a is 16x16. If + // we store strides into an attribute array of integers, the information + // cannot pass through block argument assignment because attributes are + // associated with operations but not Values. + // TODO(Keren): We may need to figure out a way to store strides as integers + // if we want to support more optimizations. + SmallVector + strides; // i32 int. The strides of the shared memory object. + SmallVector offsets; // i32 int. The offsets of the shared memory + // objects from the originally allocated object. + + SharedMemoryObject(Value base, ArrayRef strides, + ArrayRef offsets) + : base(base), strides(strides.begin(), strides.end()), + offsets(offsets.begin(), offsets.end()) {} + + SharedMemoryObject(Value base, ArrayRef shape, + ArrayRef order, Location loc, + ConversionPatternRewriter &rewriter) + : base(base) { + strides = getStridesFromShapeAndOrder(shape, order, loc, rewriter); + + for (auto idx : order) { + offsets.emplace_back(i32_val(0)); + } + } + + SmallVector getElems() const { + SmallVector elems; + elems.push_back(base); + elems.append(strides.begin(), strides.end()); + elems.append(offsets.begin(), offsets.end()); + return elems; + } + + SmallVector getTypes() const { + SmallVector types; + types.push_back(base.getType()); + types.append(strides.size(), IntegerType::get(base.getContext(), 32)); + types.append(offsets.size(), IntegerType::get(base.getContext(), 32)); + return types; + } + + Value getCSwizzleOffset(int order) const { + assert(order >= 0 && order < strides.size()); + return offsets[order]; + } + + Value getBaseBeforeSwizzle(int order, Location loc, + ConversionPatternRewriter &rewriter) const { + Value cSwizzleOffset = getCSwizzleOffset(order); + Value offset = sub(i32_val(0), cSwizzleOffset); + Type type = base.getType(); + return gep(type, base, offset); + } +}; + +static SharedMemoryObject +getSharedMemoryObjectFromStruct(Location loc, Value llvmStruct, + ConversionPatternRewriter &rewriter) { + auto elems = getElementsFromStruct(loc, llvmStruct, rewriter); + auto rank = (elems.size() - 1) / 2; + return {/*base=*/elems[0], + /*strides=*/{elems.begin() + 1, elems.begin() + 1 + rank}, + /*offsets=*/{elems.begin() + 1 + rank, elems.end()}}; +} + +static Value storeShared(ConversionPatternRewriter &rewriter, Location loc, + Value ptr, Value val, Value pred) { + MLIRContext *ctx = rewriter.getContext(); + unsigned bits = val.getType().getIntOrFloatBitWidth(); + const char *c = bits == 64 ? "l" : (bits == 16 ? "h" : "r"); + + PTXBuilder builder; + auto *ptrOpr = builder.newAddrOperand(ptr, "r"); + auto *valOpr = builder.newOperand(val, c); + auto &st = builder.create<>("st")->shared().b(bits); + st(ptrOpr, valOpr).predicate(pred, "b"); + return builder.launch(rewriter, loc, void_ty(ctx)); +} + +static Value shflSync(Location loc, ConversionPatternRewriter &rewriter, + Value val, int i) { + unsigned bits = val.getType().getIntOrFloatBitWidth(); + + if (bits == 64) { + Type vecTy = vec_ty(f32_ty, 2); + Value vec = bitcast(val, vecTy); + Value val0 = extract_element(f32_ty, vec, i32_val(0)); + Value val1 = extract_element(f32_ty, vec, i32_val(1)); + val0 = shflSync(loc, rewriter, val0, i); + val1 = shflSync(loc, rewriter, val1, i); + vec = undef(vecTy); + vec = insert_element(vecTy, vec, val0, i32_val(0)); + vec = insert_element(vecTy, vec, val1, i32_val(1)); + return bitcast(vec, val.getType()); + } + + PTXBuilder builder; + auto &shfl = builder.create("shfl.sync")->o("bfly").o("b32"); + auto *dOpr = builder.newOperand("=r"); + auto *aOpr = builder.newOperand(val, "r"); + auto *bOpr = builder.newConstantOperand(i); + auto *cOpr = builder.newConstantOperand("0x1f"); + auto *maskOpr = builder.newConstantOperand("0xffffffff"); + shfl(dOpr, aOpr, bOpr, cOpr, maskOpr); + return builder.launch(rewriter, loc, val.getType(), false); +} + +} // namespace LLVM +} // namespace mlir + +#endif diff --git a/lib/Conversion/TritonGPUToLLVM/ViewOpToLLVM.cpp b/lib/Conversion/TritonGPUToLLVM/ViewOpToLLVM.cpp new file mode 100644 index 000000000000..21dccb96323d --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/ViewOpToLLVM.cpp @@ -0,0 +1,229 @@ +#include "ViewOpToLLVM.h" +#include "DotOpHelpers.h" + +using namespace mlir; +using namespace mlir::triton; + +using ::mlir::LLVM::DotOpMmaV1ConversionHelper; +using ::mlir::LLVM::DotOpMmaV2ConversionHelper; +using ::mlir::LLVM::getElementsFromStruct; +using ::mlir::LLVM::getSharedMemoryObjectFromStruct; +using ::mlir::LLVM::getStructFromElements; +using ::mlir::triton::gpu::getElemsPerThread; + +struct SplatOpConversion + : public ConvertTritonGPUOpToLLVMPattern { + using ConvertTritonGPUOpToLLVMPattern< + triton::SplatOp>::ConvertTritonGPUOpToLLVMPattern; + + // Convert SplatOp or arith::ConstantOp with SplatElementsAttr to a + // LLVM::StructType value. + // + // @elemType: the element type in operand. + // @resType: the return type of the Splat-like op. + // @constVal: a LLVM::ConstantOp or other scalar value. + static Value convertSplatLikeOp(Type elemType, Type resType, Value constVal, + TypeConverter *typeConverter, + ConversionPatternRewriter &rewriter, + Location loc) { + auto tensorTy = resType.cast(); + if (tensorTy.getEncoding().isa() || + tensorTy.getEncoding().isa()) { + auto srcType = typeConverter->convertType(elemType); + auto llSrc = bitcast(constVal, srcType); + size_t elemsPerThread = getElemsPerThread(tensorTy); + llvm::SmallVector elems(elemsPerThread, llSrc); + llvm::SmallVector elemTypes(elems.size(), srcType); + auto structTy = + LLVM::LLVMStructType::getLiteral(rewriter.getContext(), elemTypes); + + return getStructFromElements(loc, elems, rewriter, structTy); + } else if (auto mmaLayout = + tensorTy.getEncoding().dyn_cast()) { + return convertSplatLikeOpWithMmaLayout( + mmaLayout, resType, elemType, constVal, typeConverter, rewriter, loc); + } else + assert(false && "Unsupported layout found in ConvertSplatLikeOp"); + + return {}; + } + + static Value convertSplatLikeOpWithMmaLayout( + const MmaEncodingAttr &layout, Type resType, Type elemType, + Value constVal, TypeConverter *typeConverter, + ConversionPatternRewriter &rewriter, Location loc) { + auto tensorTy = resType.cast(); + auto shape = tensorTy.getShape(); + if (layout.isAmpere()) { + auto [repM, repN] = DotOpMmaV2ConversionHelper::getRepMN(tensorTy); + size_t fcSize = 4 * repM * repN; + + auto structTy = LLVM::LLVMStructType::getLiteral( + rewriter.getContext(), SmallVector(fcSize, elemType)); + return getStructFromElements(loc, SmallVector(fcSize, constVal), + rewriter, structTy); + } + if (layout.isVolta()) { + DotOpMmaV1ConversionHelper helper(layout); + int repM = helper.getRepM(shape[0]); + int repN = helper.getRepN(shape[1]); + // According to mma layout of v1, each thread process 8 elements. + int elems = 8 * repM * repN; + + auto structTy = LLVM::LLVMStructType::getLiteral( + rewriter.getContext(), SmallVector(elems, elemType)); + return getStructFromElements(loc, SmallVector(elems, constVal), + rewriter, structTy); + } + + assert(false && "Unsupported mma layout found"); + return {}; + } + + LogicalResult matchAndRewrite(triton::SplatOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const { + auto loc = op->getLoc(); + auto src = adaptor.src(); + auto llStruct = convertSplatLikeOp(src.getType(), op.getType(), src, + getTypeConverter(), rewriter, loc); + rewriter.replaceOp(op, {llStruct}); + return success(); + } +}; + +// This pattern helps to convert arith::ConstantOp(with SplatElementsAttr), +// the logic is the same as triton::SplatOp, so the underlying implementation +// is reused. +struct ArithConstantSplatOpConversion + : public ConvertTritonGPUOpToLLVMPattern { + using ConvertTritonGPUOpToLLVMPattern< + arith::ConstantOp>::ConvertTritonGPUOpToLLVMPattern; + + LogicalResult + matchAndRewrite(arith::ConstantOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto value = op.getValue(); + if (!value.dyn_cast()) + return failure(); + + auto loc = op->getLoc(); + + LLVM::ConstantOp arithConstantOp; + auto values = op.getValue().dyn_cast(); + auto elemType = values.getElementType(); + + Attribute val; + if (elemType.isBF16() || type::isFloat(elemType)) { + val = values.getValues()[0]; + } else if (type::isInt(elemType)) { + val = values.getValues()[0]; + } else { + llvm::errs() << "ArithConstantSplatOpConversion get unsupported type: " + << value.getType() << "\n"; + return failure(); + } + + auto constOp = rewriter.create(loc, elemType, val); + auto llStruct = SplatOpConversion::convertSplatLikeOp( + elemType, op.getType(), constOp, getTypeConverter(), rewriter, loc); + rewriter.replaceOp(op, llStruct); + + return success(); + } +}; + +struct CatOpConversion : public ConvertTritonGPUOpToLLVMPattern { + using OpAdaptor = typename CatOp::Adaptor; + + explicit CatOpConversion(LLVMTypeConverter &typeConverter, + PatternBenefit benefit = 1) + : ConvertTritonGPUOpToLLVMPattern(typeConverter, benefit) {} + + LogicalResult + matchAndRewrite(CatOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Location loc = op->getLoc(); + auto resultTy = op.getType().template cast(); + unsigned elems = getElemsPerThread(resultTy); + Type elemTy = + this->getTypeConverter()->convertType(resultTy.getElementType()); + SmallVector types(elems, elemTy); + // unpack input values + auto lhsVals = getElementsFromStruct(loc, adaptor.lhs(), rewriter); + auto rhsVals = getElementsFromStruct(loc, adaptor.rhs(), rewriter); + // concatenate (and potentially reorder) values + SmallVector retVals; + for (Value v : lhsVals) + retVals.push_back(v); + for (Value v : rhsVals) + retVals.push_back(v); + // pack and replace + Type structTy = LLVM::LLVMStructType::getLiteral(this->getContext(), types); + Value ret = getStructFromElements(loc, retVals, rewriter, structTy); + rewriter.replaceOp(op, ret); + return success(); + } +}; + +template +struct ViewLikeOpConversion : public ConvertTritonGPUOpToLLVMPattern { + using OpAdaptor = typename SourceOp::Adaptor; + explicit ViewLikeOpConversion(LLVMTypeConverter &typeConverter, + PatternBenefit benefit = 1) + : ConvertTritonGPUOpToLLVMPattern(typeConverter, benefit) {} + + LogicalResult + matchAndRewrite(SourceOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + // We cannot directly run `rewriter.replaceOp(op, adaptor.src())` + // due to MLIR's restrictions + Location loc = op->getLoc(); + auto resultTy = op.getType().template cast(); + unsigned elems = getElemsPerThread(resultTy); + Type elemTy = + this->getTypeConverter()->convertType(resultTy.getElementType()); + SmallVector types(elems, elemTy); + Type structTy = LLVM::LLVMStructType::getLiteral(this->getContext(), types); + auto vals = getElementsFromStruct(loc, adaptor.src(), rewriter); + Value view = getStructFromElements(loc, vals, rewriter, structTy); + rewriter.replaceOp(op, view); + return success(); + } +}; + +struct TransOpConversion + : public ConvertTritonGPUOpToLLVMPattern { + using ConvertTritonGPUOpToLLVMPattern< + triton::TransOp>::ConvertTritonGPUOpToLLVMPattern; + + LogicalResult + matchAndRewrite(triton::TransOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Location loc = op->getLoc(); + auto srcSmemObj = + getSharedMemoryObjectFromStruct(loc, adaptor.src(), rewriter); + SmallVector dstStrides = {srcSmemObj.strides[1], + srcSmemObj.strides[0]}; + SmallVector dstOffsets = {srcSmemObj.offsets[1], + srcSmemObj.offsets[0]}; + auto dstSmemObj = + SharedMemoryObject(srcSmemObj.base, dstStrides, dstOffsets); + auto retVal = getStructFromSharedMemoryObject(loc, dstSmemObj, rewriter); + rewriter.replaceOp(op, retVal); + return success(); + } +}; + +void populateViewOpToLLVMPatterns(mlir::LLVMTypeConverter &typeConverter, + RewritePatternSet &patterns, int numWarps, + AxisInfoAnalysis &axisInfoAnalysis, + const Allocation *allocation, Value smem, + PatternBenefit benefit) { + patterns.add>(typeConverter, benefit); + patterns.add>(typeConverter, + benefit); + patterns.add(typeConverter, benefit); + patterns.add(typeConverter, benefit); + patterns.add(typeConverter, benefit); + patterns.add(typeConverter, benefit); +} diff --git a/lib/Conversion/TritonGPUToLLVM/ViewOpToLLVM.h b/lib/Conversion/TritonGPUToLLVM/ViewOpToLLVM.h new file mode 100644 index 000000000000..2f0abf5d8d64 --- /dev/null +++ b/lib/Conversion/TritonGPUToLLVM/ViewOpToLLVM.h @@ -0,0 +1,15 @@ +#ifndef TRITON_CONVERSION_TRITONGPU_TO_LLVM_VIEW_OP_H +#define TRITON_CONVERSION_TRITONGPU_TO_LLVM_VIEW_OP_H + +#include "TritonGPUToLLVMBase.h" + +using namespace mlir; +using namespace mlir::triton; + +void populateViewOpToLLVMPatterns(mlir::LLVMTypeConverter &typeConverter, + RewritePatternSet &patterns, int numWarps, + AxisInfoAnalysis &axisInfoAnalysis, + const Allocation *allocation, Value smem, + PatternBenefit benefit); + +#endif diff --git a/lib/Conversion/TritonToTritonGPU/CMakeLists.txt b/lib/Conversion/TritonToTritonGPU/CMakeLists.txt new file mode 100644 index 000000000000..87803c2e2085 --- /dev/null +++ b/lib/Conversion/TritonToTritonGPU/CMakeLists.txt @@ -0,0 +1,19 @@ +add_mlir_conversion_library(TritonToTritonGPU + TritonToTritonGPUPass.cpp + + ADDITIONAL_HEADER_DIRS + ${PROJECT_SOURCE_DIR}/include/triton/Conversion/TritonToTritonGPU + + DEPENDS + TritonConversionPassIncGen + + LINK_COMPONENTS + Core + + LINK_LIBS PUBLIC + MLIRIR + MLIRPass + TritonIR + TritonGPUIR + TritonGPUTransforms +) diff --git a/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp b/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp new file mode 100644 index 000000000000..ce5698289549 --- /dev/null +++ b/lib/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.cpp @@ -0,0 +1,647 @@ +#include "triton/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.h" + +#include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" +#include "mlir/Dialect/GPU/GPUDialect.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/Dialect/LLVMIR/NVVMDialect.h" +#include "mlir/Dialect/StandardOps/IR/Ops.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Transforms/DialectConversion.h" +#include "triton/Dialect/Triton/IR/Dialect.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" +#include "triton/Dialect/TritonGPU/Transforms/TritonGPUConversion.h" +#include "llvm/ADT/APSInt.h" +#include + +using namespace mlir; +using namespace mlir::triton; + +#define GEN_PASS_CLASSES +#include "triton/Conversion/Passes.h.inc" + +namespace { + +template class GenericOpPattern : public OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(Op op, typename Op::Adaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Type retType = this->getTypeConverter()->convertType(op.getType()); + rewriter.replaceOpWithNewOp(op, retType, adaptor.getOperands()); + return success(); + } +}; + +template +class ArithCmpPattern : public OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(SrcOp op, typename SrcOp::Adaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Type retType = this->getTypeConverter()->convertType(op.getType()); + rewriter.replaceOpWithNewOp(op, retType, adaptor.getPredicate(), + adaptor.getLhs(), adaptor.getRhs()); + return success(); + } +}; + +class ArithConstantPattern : public OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(arith::ConstantOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Type retType = getTypeConverter()->convertType(op.getType()); + auto value = adaptor.getValue().dyn_cast(); + assert(value); + rewriter.replaceOpWithNewOp( + op, retType, + value.reshape(retType) // This is a hack. We just want to add encoding + ); + return success(); + } +}; + +class ConvertArithmeticOp : public ConversionPattern { +public: + ConvertArithmeticOp(TritonGPUTypeConverter &typeConverter, + MLIRContext *context) + : ConversionPattern(typeConverter, MatchAnyOpTypeTag(), /*benefit=*/1, + context) {} + + LogicalResult + matchAndRewrite(Operation *op, ArrayRef operands, + ConversionPatternRewriter &rewriter) const override { + Dialect *dialect = op->getDialect(); + if (dialect->getTypeID() != mlir::TypeID::get()) + return failure(); + return success(); + } +}; + +void populateArithmeticPatternsAndLegality( + TritonGPUTypeConverter &typeConverter, RewritePatternSet &patterns, + TritonGPUConversionTarget &target) { + // -------------- + // Add legality and rewrite pattern rules for operations + // from the Arithmetic dialect. The basic premise is that + // arithmetic operations require both inputs to have the same + // non-null encoding + // -------------- + MLIRContext *context = patterns.getContext(); + // TODO: there's probably a better way to avoid adding all ops one-by-one + patterns.add< + ArithConstantPattern, GenericOpPattern, + GenericOpPattern, GenericOpPattern, + GenericOpPattern, GenericOpPattern, + GenericOpPattern, + GenericOpPattern, + GenericOpPattern, GenericOpPattern, + GenericOpPattern, GenericOpPattern, + GenericOpPattern, GenericOpPattern, + GenericOpPattern, GenericOpPattern, + GenericOpPattern, // NegFOp + // Floating point + GenericOpPattern, GenericOpPattern, + // MaxMin + GenericOpPattern, GenericOpPattern, + GenericOpPattern, GenericOpPattern, + GenericOpPattern, GenericOpPattern, + // Floating point + GenericOpPattern, GenericOpPattern, + GenericOpPattern, + // Cmp + ArithCmpPattern, + ArithCmpPattern, + // Cast Ops + GenericOpPattern, GenericOpPattern, + GenericOpPattern, GenericOpPattern, + GenericOpPattern, GenericOpPattern, + GenericOpPattern, GenericOpPattern, + GenericOpPattern>(typeConverter, context); +} + +// this shouldn't exist if mlir's SelectOp checked encodings properly +class StdSelectPattern : public OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(SelectOp op, typename SelectOp::Adaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Type retType = this->getTypeConverter()->convertType(op.getType()); + rewriter.replaceOpWithNewOp( + op, retType, adaptor.getCondition(), adaptor.getTrueValue(), + adaptor.getFalseValue()); + return success(); + } +}; + +void populateStdPatternsAndLegality(TritonGPUTypeConverter &typeConverter, + RewritePatternSet &patterns, + TritonGPUConversionTarget &target) { + MLIRContext *context = patterns.getContext(); + // Rewrite rule + patterns.add(typeConverter, context); + target.addLegalOp(); // this is ok because all functions are inlined + // by the frontend +} + +void populateMathPatternsAndLegality(TritonGPUTypeConverter &typeConverter, + RewritePatternSet &patterns, + TritonGPUConversionTarget &target) { + MLIRContext *context = patterns.getContext(); + // Rewrite rule + patterns.add, GenericOpPattern, + GenericOpPattern, GenericOpPattern, + GenericOpPattern>(typeConverter, context); +} + +// +// Triton patterns +// +// TODO: Do we need to put them in anonymous namespace? +struct TritonMakeRangePattern + : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(triton::MakeRangeOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Type retType = getTypeConverter()->convertType(op.getType()); + rewriter.replaceOpWithNewOp( + op, retType, adaptor.start(), adaptor.end()); + return success(); + } +}; + +struct TritonExpandDimsPattern + : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(triton::ExpandDimsOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + // Type retType = op.getType()); + RankedTensorType argType = adaptor.src().getType().cast(); + Attribute _argEncoding = argType.getEncoding(); + if (!_argEncoding) + return failure(); + auto argEncoding = _argEncoding.cast(); + // return shape + auto retShape = argType.getShape().vec(); + retShape.insert(retShape.begin() + op.axis(), 1); + // return encoding + auto retSizePerThread = argEncoding.getSizePerThread().vec(); + retSizePerThread.insert(retSizePerThread.begin() + op.axis(), 1); + auto retThreadsPerWarp = argEncoding.getThreadsPerWarp().vec(); + retThreadsPerWarp.insert(retThreadsPerWarp.begin() + op.axis(), 1); + auto retWarpsPerCTA = argEncoding.getWarpsPerCTA().vec(); + retWarpsPerCTA.insert(retWarpsPerCTA.begin() + op.axis(), 1); + SmallVector retOrder(retShape.size()); + std::iota(retOrder.begin(), retOrder.end(), 0); + triton::gpu::BlockedEncodingAttr retEncoding = + triton::gpu::BlockedEncodingAttr::get(getContext(), retSizePerThread, + retThreadsPerWarp, retWarpsPerCTA, + retOrder); + // convert operand to slice of return type + Attribute newArgEncoding = triton::gpu::SliceEncodingAttr::get( + getContext(), op.axis(), retEncoding); + RankedTensorType newArgType = RankedTensorType::get( + argType.getShape(), argType.getElementType(), newArgEncoding); + // construct new op + auto newSrc = rewriter.create( + op.getLoc(), newArgType, adaptor.src()); + rewriter.replaceOpWithNewOp(op, newSrc, + adaptor.axis()); + return success(); + } +}; + +struct TritonDotPattern : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(triton::DotOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + RankedTensorType origType = op.getType().cast(); + auto origShape = origType.getShape(); + auto typeConverter = getTypeConverter(); + int numWarps = typeConverter->getNumWarps(); + + SmallVector retSizePerThread = {1, 1}; + if (origShape[0] * origShape[1] / (numWarps * 32) >= 4) + retSizePerThread = {2, 2}; + if (origShape[0] * origShape[1] / (numWarps * 32) >= 16) + retSizePerThread = {4, 4}; + SmallVector retOrder = {1, 0}; + Attribute dEncoding = triton::gpu::BlockedEncodingAttr::get( + getContext(), origShape, retSizePerThread, retOrder, numWarps); + RankedTensorType retType = + RankedTensorType::get(origShape, origType.getElementType(), dEncoding); + // a & b must be of smem layout + auto aType = adaptor.a().getType().cast(); + auto bType = adaptor.b().getType().cast(); + Attribute aEncoding = aType.getEncoding(); + Attribute bEncoding = bType.getEncoding(); + if (!aEncoding || !bEncoding) + return failure(); + Value a = adaptor.a(); + Value b = adaptor.b(); + Value c = adaptor.c(); + if (!aEncoding.isa()) { + Attribute encoding = + triton::gpu::DotOperandEncodingAttr::get(getContext(), 0, dEncoding); + auto dstType = RankedTensorType::get(aType.getShape(), + aType.getElementType(), encoding); + a = rewriter.create(a.getLoc(), dstType, a); + } + if (!bEncoding.isa()) { + Attribute encoding = + triton::gpu::DotOperandEncodingAttr::get(getContext(), 1, dEncoding); + auto dstType = RankedTensorType::get(bType.getShape(), + bType.getElementType(), encoding); + b = rewriter.create(b.getLoc(), dstType, b); + } + c = rewriter.create(c.getLoc(), retType, c); + + rewriter.replaceOpWithNewOp(op, retType, a, b, c, + adaptor.allowTF32()); + return success(); + } +}; + +struct TritonCatPattern : public OpConversionPattern { + + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(triton::CatOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + // For now, this behaves like generic, but this will evolve when + // we add support for `can_reorder=False` + Type retType = this->getTypeConverter()->convertType(op.getType()); + rewriter.replaceOpWithNewOp(op, retType, + adaptor.getOperands()); + return success(); + } +}; + +struct TritonTransPattern : public OpConversionPattern { + + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(triton::TransOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Value src = adaptor.src(); + auto srcType = src.getType().cast(); + Attribute srcEncoding = srcType.getEncoding(); + if (!srcEncoding) + return failure(); + if (!srcEncoding.isa()) { + // TODO: end-to-end correctness is broken if + // the input is blocked and the output is shared + // with different order. Maybe a backend issue in BlockedToShared? + SmallVector order = {1, 0}; + if (auto srcBlockedEncoding = + srcEncoding.dyn_cast()) + llvm::copy(srcBlockedEncoding.getOrder(), order.begin()); + srcEncoding = + triton::gpu::SharedEncodingAttr::get(getContext(), 1, 1, 1, order); + srcType = RankedTensorType::get(srcType.getShape(), + srcType.getElementType(), srcEncoding); + src = rewriter.create(src.getLoc(), srcType, + src); + } + auto srcSharedEncoding = + srcEncoding.cast(); + SmallVector retOrder(srcSharedEncoding.getOrder().begin(), + srcSharedEncoding.getOrder().end()); + SmallVector retShapes(srcType.getShape().begin(), + srcType.getShape().end()); + std::reverse(retOrder.begin(), retOrder.end()); + std::reverse(retShapes.begin(), retShapes.end()); + auto retEncoding = + triton::gpu::SharedEncodingAttr::get(getContext(), 1, 1, 1, retOrder); + auto retType = + RankedTensorType::get(retShapes, srcType.getElementType(), retEncoding); + + rewriter.replaceOpWithNewOp(op, retType, src); + return success(); + } +}; + +struct TritonLoadPattern : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(triton::LoadOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, typeConverter->convertType(op.getType()), adaptor.ptr(), + adaptor.mask(), adaptor.other(), adaptor.cache(), adaptor.evict(), + adaptor.isVolatile()); + return success(); + } +}; + +struct TritonStorePattern : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(triton::StoreOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, adaptor.ptr(), adaptor.value(), adaptor.mask()); + return success(); + } +}; + +struct TritonAtomicCASPattern + : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(triton::AtomicCASOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, typeConverter->convertType(op.getType()), adaptor.ptr(), + adaptor.cmp(), adaptor.val()); + return success(); + } +}; + +struct TritonAtomicRMWPattern + : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(triton::AtomicRMWOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, typeConverter->convertType(op.getType()), adaptor.atomic_rmw_op(), + adaptor.ptr(), adaptor.val(), adaptor.mask()); + return success(); + } +}; + +struct TritonExtElemwisePattern + : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(triton::ExtElemwiseOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, typeConverter->convertType(op.getType()), adaptor.args(), + adaptor.libname(), adaptor.libpath(), adaptor.symbol()); + return success(); + } +}; + +template +struct TritonGenericPattern : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(Op op, typename Op::Adaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + Type retType = this->getTypeConverter()->convertType(op.getType()); + rewriter.replaceOpWithNewOp(op, retType, adaptor.getOperands()); + return success(); + } +}; + +struct TritonBroadcastPattern + : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + // This creates a tensor with the new shape but the argument's layout + LogicalResult + matchAndRewrite(BroadcastOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto srcType = adaptor.src().getType().cast(); + auto srcEncoding = srcType.getEncoding(); + if (!srcEncoding) + return failure(); + auto opType = op.getType().cast(); + Type retType = RankedTensorType::get(opType.getShape(), + opType.getElementType(), srcEncoding); + // Type retType = this->getTypeConverter()->convertType(op.getType()); + rewriter.replaceOpWithNewOp(op, retType, + adaptor.getOperands()); + return success(); + } +}; + +struct TritonReducePattern : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(triton::ReduceOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp( + op, adaptor.redOp(), adaptor.operand(), adaptor.axis()); + return success(); + } +}; + +struct TritonPrintfPattern : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(PrintfOp op, typename PrintfOp::Adaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + rewriter.replaceOpWithNewOp(op, op.prefixAttr(), + adaptor.getOperands()); + return success(); + } +}; + +void populateTritonPatterns(TritonGPUTypeConverter &typeConverter, + RewritePatternSet &patterns) { + MLIRContext *context = patterns.getContext(); + patterns.add< // TODO: view should have custom pattern that views the layout + TritonGenericPattern, + TritonGenericPattern, + TritonGenericPattern, + TritonGenericPattern, + TritonGenericPattern, + TritonGenericPattern, TritonBroadcastPattern, + TritonGenericPattern, TritonCatPattern, + TritonReducePattern, TritonTransPattern, TritonExpandDimsPattern, + TritonMakeRangePattern, TritonDotPattern, TritonLoadPattern, + TritonStorePattern, TritonExtElemwisePattern, TritonPrintfPattern, + TritonAtomicRMWPattern>(typeConverter, context); +} + +// +// SCF patterns +// +// This is borrowed from ConvertForOpTypes in +// SCF/Transforms/StructuralTypeConversions.cpp +struct SCFForPattern : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + // Ref: ConvertForOpTypes + LogicalResult + matchAndRewrite(scf::ForOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + auto newOp = + cast(rewriter.cloneWithoutRegions(*op.getOperation())); + rewriter.inlineRegionBefore(op.getLoopBody(), newOp.getLoopBody(), + newOp.getLoopBody().end()); + + // Now, update all the types. + + // Convert the types of block arguments within the given region. This + // replaces each block with a new block containing the updated signature. + // The entry block may have a special conversion if `entryConversion` is + // provided. On success, the new entry block to the region is returned for + // convenience. Otherwise, failure is returned. + if (failed(rewriter.convertRegionTypes(&newOp.getLoopBody(), + *getTypeConverter()))) { + return rewriter.notifyMatchFailure(op, "could not convert body types"); + } + // Change the clone to use the updated operands. We could have cloned with + // a BlockAndValueMapping, but this seems a bit more direct. + newOp->setOperands(adaptor.getOperands()); + // Update the result types to the new converted types. + SmallVector newResultTypes; + for (Type type : op.getResultTypes()) { + Type newType = typeConverter->convertType(type); + if (!newType) + return rewriter.notifyMatchFailure(op, "not a 1:1 type conversion"); + newResultTypes.push_back(newType); + } + for (auto t : llvm::zip(newOp.getResults(), newResultTypes)) + std::get<0>(t).setType(std::get<1>(t)); + + rewriter.replaceOp(op, newOp.getResults()); + + return success(); + } +}; + +struct SCFYieldPattern : public OpConversionPattern { + using OpConversionPattern::OpConversionPattern; + + LogicalResult + matchAndRewrite(scf::YieldOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + // rewriter.setInsertionPointToEnd(rewriter.getInsertionBlock()); + // rewriter.create(op.getLoc(), adaptor.getOperands()); + // op.erase(); + rewriter.replaceOpWithNewOp(op, adaptor.getOperands()); + return success(); + } +}; + +// This is borrowed from ConvertFIfOpTypes in +// SCF/Transforms/StructuralTypeConversions.cpp +class SCFIfPattern : public OpConversionPattern { +public: + using OpConversionPattern::OpConversionPattern; + LogicalResult + matchAndRewrite(scf::IfOp op, OpAdaptor adaptor, + ConversionPatternRewriter &rewriter) const override { + // TODO: Generalize this to any type conversion, not just 1:1. + // + // We need to implement something more sophisticated here that tracks which + // types convert to which other types and does the appropriate + // materialization logic. + // For example, it's possible that one result type converts to 0 types and + // another to 2 types, so newResultTypes would at least be the right size to + // not crash in the llvm::zip call below, but then we would set the the + // wrong type on the SSA values! These edge cases are also why we cannot + // safely use the TypeConverter::convertTypes helper here. + SmallVector newResultTypes; + for (auto type : op.getResultTypes()) { + Type newType = typeConverter->convertType(type); + if (!newType) + return rewriter.notifyMatchFailure(op, "not a 1:1 type conversion"); + newResultTypes.push_back(newType); + } + + // See comments in the ForOp pattern for why we clone without regions and + // then inline. + scf::IfOp newOp = + cast(rewriter.cloneWithoutRegions(*op.getOperation())); + rewriter.inlineRegionBefore(op.getThenRegion(), newOp.getThenRegion(), + newOp.getThenRegion().end()); + rewriter.inlineRegionBefore(op.getElseRegion(), newOp.getElseRegion(), + newOp.getElseRegion().end()); + + // Update the operands and types. + newOp->setOperands(adaptor.getOperands()); + for (auto t : llvm::zip(newOp.getResults(), newResultTypes)) + std::get<0>(t).setType(std::get<1>(t)); + rewriter.replaceOp(op, newOp.getResults()); + return success(); + } +}; + +void populateSCFPatterns(TritonGPUTypeConverter &typeConverter, + RewritePatternSet &patterns) { + MLIRContext *context = patterns.getContext(); + patterns.add(typeConverter, + context); +} + +class ConvertTritonToTritonGPU + : public ConvertTritonToTritonGPUBase { +public: + ConvertTritonToTritonGPU() = default; + // constructor with some parameters set explicitly. + ConvertTritonToTritonGPU(int numWarps) { this->numWarps = numWarps; } + + void runOnOperation() override { + MLIRContext *context = &getContext(); + ModuleOp mod = getOperation(); + // type converter + TritonGPUTypeConverter typeConverter(context, numWarps); + TritonGPUConversionTarget target(*context, typeConverter); + // rewrite patterns + RewritePatternSet patterns(context); + // add rules + populateStdPatternsAndLegality(typeConverter, patterns, target); + populateArithmeticPatternsAndLegality(typeConverter, patterns, target); + populateMathPatternsAndLegality(typeConverter, patterns, target); + populateTritonPatterns(typeConverter, patterns); + // TODO: can we use + // mlir::scf::populateSCFStructurealTypeConversionsAndLegality(...) here? + populateSCFPatterns(typeConverter, patterns); + + if (failed(applyPartialConversion(mod, target, std::move(patterns)))) + return signalPassFailure(); + + auto inti = llvm::APSInt(32, false); + auto i32_ty = IntegerType::get(mod->getContext(), 32); + + mod->setAttr( + AttrNumWarpsName, + IntegerAttr::get(i32_ty, llvm::APInt(32, numWarps.getValue()))); + + // update layouts + // broadcast src => multicast, dst => broadcasted + // if (failed(target.refineLayouts(mod, numWarps))) + // return signalPassFailure(); + } +}; + +} // namespace + +std::unique_ptr> +mlir::triton::createConvertTritonToTritonGPUPass(int numWarps) { + return std::make_unique<::ConvertTritonToTritonGPU>(numWarps); +} + +std::unique_ptr> +mlir::triton::createConvertTritonToTritonGPUPass() { + return std::make_unique<::ConvertTritonToTritonGPU>(); +} diff --git a/lib/Dialect/CMakeLists.txt b/lib/Dialect/CMakeLists.txt new file mode 100644 index 000000000000..27cb65ce5101 --- /dev/null +++ b/lib/Dialect/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(Triton) +add_subdirectory(TritonGPU) diff --git a/lib/Dialect/Triton/CMakeLists.txt b/lib/Dialect/Triton/CMakeLists.txt new file mode 100644 index 000000000000..9f57627c321f --- /dev/null +++ b/lib/Dialect/Triton/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(IR) +add_subdirectory(Transforms) diff --git a/lib/Dialect/Triton/IR/CMakeLists.txt b/lib/Dialect/Triton/IR/CMakeLists.txt new file mode 100644 index 000000000000..2d679b21fde8 --- /dev/null +++ b/lib/Dialect/Triton/IR/CMakeLists.txt @@ -0,0 +1,20 @@ +add_mlir_dialect_library(TritonIR + Interfaces.cpp + Dialect.cpp + Ops.cpp + Types.cpp + Traits.cpp + + DEPENDS + TritonTableGen + + LINK_LIBS PUBLIC + MLIRIR + MLIRArithmetic + MLIRSCF + + # Since LLVM 15 + # MLIRFunc + # else + MLIRStandard +) diff --git a/lib/Dialect/Triton/IR/Dialect.cpp b/lib/Dialect/Triton/IR/Dialect.cpp new file mode 100644 index 000000000000..437de26031e2 --- /dev/null +++ b/lib/Dialect/Triton/IR/Dialect.cpp @@ -0,0 +1,51 @@ +#include "triton/Dialect/Triton/IR/Dialect.h" +#include "triton/Dialect/Triton/IR/Types.h" + +#include "triton/Dialect/Triton/IR/AttrInterfaces.h.inc" +#include "llvm/ADT/StringSwitch.h" +#include "llvm/ADT/TypeSwitch.h" +#include "llvm/Support/raw_ostream.h" + +#include "mlir/IR/DialectImplementation.h" + +#include "mlir/Transforms/InliningUtils.h" +#include "triton/Dialect/Triton/IR/Dialect.cpp.inc" + +using namespace mlir; +using namespace mlir::triton; + +//===----------------------------------------------------------------------===// +// TritonDialect Dialect Interfaces +//===----------------------------------------------------------------------===// + +namespace { +struct TritonInlinerInterface : public DialectInlinerInterface { + using DialectInlinerInterface::DialectInlinerInterface; + bool isLegalToInline(Region *dest, Region *src, bool wouldBeCloned, + BlockAndValueMapping &valueMapping) const final { + return true; + } + bool isLegalToInline(Operation *, Region *, bool wouldBeCloned, + BlockAndValueMapping &) const final { + return true; + } +}; +} // namespace + +void TritonDialect::initialize() { + registerTypes(); + + addOperations< +#define GET_OP_LIST +#include "triton/Dialect/Triton/IR/Ops.cpp.inc" + >(); + + // We can also add interface here. + addInterfaces(); +} + +Operation *TritonDialect::materializeConstant(OpBuilder &builder, + Attribute value, Type type, + Location loc) { + return builder.create(loc, type, value); +} \ No newline at end of file diff --git a/lib/Dialect/Triton/IR/Interfaces.cpp b/lib/Dialect/Triton/IR/Interfaces.cpp new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/lib/Dialect/Triton/IR/Ops.cpp b/lib/Dialect/Triton/IR/Ops.cpp new file mode 100644 index 000000000000..cba9e8b6b694 --- /dev/null +++ b/lib/Dialect/Triton/IR/Ops.cpp @@ -0,0 +1,346 @@ +#include "triton/Dialect/Triton/IR/Dialect.h" +#include "triton/Dialect/Triton/IR/Types.h" + +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/BuiltinTypes.h" +#include "mlir/IR/OperationSupport.h" + +namespace mlir { +namespace triton { + +// Type inference +static Type getI1SameShape(Type type) { + auto i1Type = IntegerType::get(type.getContext(), 1); + if (auto tensorType = type.dyn_cast()) + return RankedTensorType::get(tensorType.getShape(), i1Type, + tensorType.getEncoding()); + return i1Type; +} + +static Type getI32SameShape(Type type) { + auto i32Type = IntegerType::get(type.getContext(), 32); + if (auto tensorType = type.dyn_cast()) + return RankedTensorType::get(tensorType.getShape(), i32Type, + tensorType.getEncoding()); + return i32Type; +} + +static Type getPointerTypeSameShape(Type type) { + if (auto tensorType = type.dyn_cast()) { + Type elementType = tensorType.getElementType(); + auto shape = tensorType.getShape(); + PointerType ptrType = PointerType::get(elementType, 1); + return RankedTensorType::get(shape, ptrType, tensorType.getEncoding()); + } else { + return PointerType::get(type, 1); + } +} + +// Parser & printer for assembly forms +ParseResult parseLoadOp(OpAsmParser &parser, OperationState &result) { + SmallVector allOperands; + Type resultTypes[1]; + SMLoc allOperandLoc = parser.getCurrentLocation(); + if (parser.parseOperandList(allOperands) || + parser.parseOptionalAttrDict(result.attributes) || parser.parseColon() || + parser.parseCustomTypeWithFallback(resultTypes[0])) + return failure(); + + result.addTypes(resultTypes); + + SmallVector operandTypes; + operandTypes.push_back(getPointerTypeSameShape(resultTypes[0])); // ptr + int hasMask = 0, hasOther = 0; + if (allOperands.size() >= 2) { + operandTypes.push_back(getI1SameShape(resultTypes[0])); // mask + hasMask = 1; + } + if (allOperands.size() >= 3) { + operandTypes.push_back(resultTypes[0]); // other + hasOther = 1; + } + + if (parser.resolveOperands(allOperands, operandTypes, allOperandLoc, + result.operands)) + return failure(); + // Deduce operand_segment_sizes from the number of the operands. + auto operand_segment_sizesAttrName = + LoadOp::operand_segment_sizesAttrName(result.name); + result.addAttribute( + operand_segment_sizesAttrName, + parser.getBuilder().getI32VectorAttr({1, hasMask, hasOther})); + return success(); +} + +void printLoadOp(OpAsmPrinter &printer, LoadOp loadOp) { + printer << " "; + printer << loadOp.getOperation()->getOperands(); + // "operand_segment_sizes" can be deduced, so we don't print it. + printer.printOptionalAttrDict(loadOp->getAttrs(), + {loadOp.operand_segment_sizesAttrName()}); + printer << " : "; + printer.printStrippedAttrOrType(loadOp.result().getType()); +} + +ParseResult parseStoreOp(OpAsmParser &parser, OperationState &result) { + SmallVector allOperands; + Type valueType; + SMLoc allOperandLoc = parser.getCurrentLocation(); + if (parser.parseOperandList(allOperands) || + parser.parseOptionalAttrDict(result.attributes) || parser.parseColon() || + parser.parseCustomTypeWithFallback(valueType)) + return failure(); + + SmallVector operandTypes; + operandTypes.push_back(getPointerTypeSameShape(valueType)); // ptr + operandTypes.push_back(valueType); // value + if (allOperands.size() >= 3) + operandTypes.push_back(getI1SameShape(valueType)); // mask + + if (parser.resolveOperands(allOperands, operandTypes, allOperandLoc, + result.operands)) + return failure(); + return success(); +} + +void printStoreOp(OpAsmPrinter &printer, StoreOp storeOp) { + printer << " "; + printer << storeOp.getOperation()->getOperands(); + printer.printOptionalAttrDict(storeOp->getAttrs(), /*elidedAttrs=*/{}); + printer << " : "; + printer.printStrippedAttrOrType(storeOp.value().getType()); +} + +} // namespace triton +} // namespace mlir + +#define GET_OP_CLASSES +#include "triton/Dialect/Triton/IR/Ops.cpp.inc" + +// enum attribute definitions +#include "triton/Dialect/Triton/IR/OpsEnums.cpp.inc" + +namespace mlir { +namespace triton { + +//-- FpToFpOp -- +bool FpToFpOp::areCastCompatible(::mlir::TypeRange inputs, + ::mlir::TypeRange outputs) { + if (inputs.size() != 1 || outputs.size() != 1) + return false; + auto srcEltType = inputs.front(); + auto dstEltType = outputs.front(); + auto srcTensorType = srcEltType.dyn_cast(); + auto dstTensorType = dstEltType.dyn_cast(); + if (srcTensorType && dstTensorType) { + srcEltType = srcTensorType.getElementType(); + dstEltType = dstTensorType.getElementType(); + } + // Check whether fp8 <=> fp16, bf16, f32, f64 + // Make `srcEltType` always the fp8 side + if (dstEltType.dyn_cast()) + std::swap(srcEltType, dstEltType); + if (!srcEltType.dyn_cast()) + return false; + return dstEltType.isF16() || dstEltType.isBF16() || dstEltType.isF32() || + dstEltType.isF64(); +} + +//-- StoreOp -- +void StoreOp::build(::mlir::OpBuilder &builder, ::mlir::OperationState &state, + ::mlir::Value ptr, ::mlir::Value value) { + StoreOp::build(builder, state, ptr, value, mlir::Value()); +} + +//-- LoadOp -- +static Type getLoadOpResultType(::mlir::OpBuilder &builder, Type ptrType) { + auto ptrTensorType = ptrType.dyn_cast(); + if (!ptrTensorType) + return ptrType.cast().getPointeeType(); + auto shape = ptrTensorType.getShape(); + Type elementType = + ptrTensorType.getElementType().cast().getPointeeType(); + return RankedTensorType::get(shape, elementType); +} + +void LoadOp::build(::mlir::OpBuilder &builder, ::mlir::OperationState &state, + ::mlir::Value ptr, ::mlir::triton::CacheModifier cache, + ::mlir::triton::EvictionPolicy evict, bool isVolatile) { + LoadOp::build(builder, state, ptr, mlir::Value(), mlir::Value(), cache, evict, + isVolatile); +} + +void LoadOp::build(::mlir::OpBuilder &builder, ::mlir::OperationState &state, + ::mlir::Value ptr, ::mlir::Value mask, + ::mlir::triton::CacheModifier cache, + ::mlir::triton::EvictionPolicy evict, bool isVolatile) { + LoadOp::build(builder, state, ptr, mask, mlir::Value(), cache, evict, + isVolatile); +} + +void LoadOp::build(::mlir::OpBuilder &builder, ::mlir::OperationState &state, + ::mlir::Value ptr, ::mlir::Value mask, ::mlir::Value other, + ::mlir::triton::CacheModifier cache, + ::mlir::triton::EvictionPolicy evict, bool isVolatile) { + Type resultType = getLoadOpResultType(builder, ptr.getType()); + + state.addOperands(ptr); + if (mask) { + state.addOperands(mask); + if (other) { + state.addOperands(other); + } + } + state.addAttribute( + operand_segment_sizesAttrName(state.name), + builder.getI32VectorAttr({1, (mask ? 1 : 0), (other ? 1 : 0)})); + state.addAttribute( + cacheAttrName(state.name), + ::mlir::triton::CacheModifierAttr::get(builder.getContext(), cache)); + state.addAttribute( + evictAttrName(state.name), + ::mlir::triton::EvictionPolicyAttr::get(builder.getContext(), evict)); + state.addAttribute(isVolatileAttrName(state.name), + builder.getBoolAttr(isVolatile)); + state.addTypes({resultType}); +} + +//-- DotOp -- +mlir::LogicalResult mlir::triton::DotOp::inferReturnTypes( + MLIRContext *context, Optional location, ValueRange operands, + DictionaryAttr attributes, RegionRange regions, + SmallVectorImpl &inferredReturnTypes) { + // type is the same as the accumulator + auto accTy = operands[2].getType().cast(); + inferredReturnTypes.push_back(accTy); + + // verify encodings + auto aEnc = operands[0].getType().cast().getEncoding(); + auto bEnc = operands[1].getType().cast().getEncoding(); + auto retEnc = accTy.getEncoding(); + if (aEnc) { + assert(bEnc); + Dialect &dialect = aEnc.getDialect(); + auto interface = dyn_cast(&dialect); + if (interface->inferDotOpEncoding(aEnc, 0, retEnc, location).failed()) + return mlir::failure(); + if (interface->inferDotOpEncoding(bEnc, 1, retEnc, location).failed()) + return mlir::failure(); + } + return mlir::success(); +} + +//-- ReduceOp -- +mlir::LogicalResult mlir::triton::ReduceOp::inferReturnTypes( + MLIRContext *context, Optional location, ValueRange operands, + DictionaryAttr attributes, RegionRange regions, + SmallVectorImpl &inferredReturnTypes) { + // infer shape + Value arg = operands[0]; + auto argTy = arg.getType().cast(); + auto argEltTy = argTy.getElementType(); + auto i32Ty = IntegerType::get(argEltTy.getContext(), 32); + auto redOp = + attributes.get("redOp").cast().getValue(); + bool withIndex = mlir::triton::ReduceOp::withIndex(redOp); + auto retEltTy = withIndex ? i32Ty : argEltTy; + auto retShape = argTy.getShape().vec(); + int axis = attributes.get("axis").cast().getInt(); + retShape.erase(retShape.begin() + axis); + if (retShape.empty()) { + // 0d-tensor -> scalar + inferredReturnTypes.push_back(retEltTy); + } else { + // nd-tensor where n >= 1 + // infer encoding + Attribute argEncoding = argTy.getEncoding(); + Attribute retEncoding; + if (argEncoding) { + Dialect &dialect = argEncoding.getDialect(); + auto inferLayoutInterface = + dyn_cast(&dialect); + if (inferLayoutInterface + ->inferReduceOpEncoding(argEncoding, axis, retEncoding) + .failed()) { + llvm::report_fatal_error("failed to infer layout for ReduceOp"); + return mlir::failure(); + } + } + // create type + inferredReturnTypes.push_back( + RankedTensorType::get(retShape, retEltTy, retEncoding)); + } + return mlir::success(); +} + +bool mlir::triton::ReduceOp::withIndex(mlir::triton::RedOp redOp) { + return redOp == mlir::triton::RedOp::ARGMIN || + redOp == mlir::triton::RedOp::ARGMAX || + redOp == mlir::triton::RedOp::ARGUMIN || + redOp == mlir::triton::RedOp::ARGUMAX || + redOp == mlir::triton::RedOp::ARGFMIN || + redOp == mlir::triton::RedOp::ARGFMAX; +} + +//-- SplatOp -- +OpFoldResult SplatOp::fold(ArrayRef operands) { + auto constOperand = src().getDefiningOp(); + if (!constOperand) + return {}; + auto shapedType = getType().cast(); + auto ret = SplatElementsAttr::get(shapedType, {constOperand.getValue()}); + return ret; +} + +//-- ExpandDimsOp -- +mlir::LogicalResult mlir::triton::ExpandDimsOp::inferReturnTypes( + MLIRContext *context, Optional loc, ValueRange operands, + DictionaryAttr attributes, RegionRange regions, + SmallVectorImpl &inferredReturnTypes) { + // infer shape + auto arg = operands[0]; + auto argTy = arg.getType().cast(); + auto retShape = argTy.getShape().vec(); + int axis = attributes.get("axis").cast().getInt(); + retShape.insert(retShape.begin() + axis, 1); + // infer encoding + Attribute argEncoding = argTy.getEncoding(); + Attribute retEncoding; + if (argEncoding) { + Dialect &dialect = argEncoding.getDialect(); + auto inferLayoutInterface = dyn_cast(&dialect); + if (inferLayoutInterface + ->inferExpandDimsOpEncoding(argEncoding, axis, retEncoding, loc) + .failed()) + return emitOptionalError(loc, "failed to infer layout for ExpandDimsOp"); + } + // create type + auto argEltTy = argTy.getElementType(); + inferredReturnTypes.push_back( + RankedTensorType::get(retShape, argEltTy, retEncoding)); + return mlir::success(); +} + +//-- BroadcastOp -- +OpFoldResult BroadcastOp::fold(ArrayRef operands) { + auto constOperand = src().getDefiningOp(); + if (!constOperand) + return {}; + + auto shapedType = getType().cast(); + auto value = constOperand.getValue(); + if (auto denseElemsAttr = value.dyn_cast()) { + if (!denseElemsAttr.isSplat()) + return {}; + return SplatElementsAttr::get(shapedType, + denseElemsAttr.getSplatValue()); + } else if (value.getType().isIntOrIndexOrFloat()) { + return SplatElementsAttr::get(shapedType, value); + } else { + return {}; + } +} + +} // namespace triton +} // namespace mlir diff --git a/lib/Dialect/Triton/IR/Traits.cpp b/lib/Dialect/Triton/IR/Traits.cpp new file mode 100644 index 000000000000..eede3f4069f3 --- /dev/null +++ b/lib/Dialect/Triton/IR/Traits.cpp @@ -0,0 +1,71 @@ +#include "triton/Dialect/Triton/IR/Traits.h" + +static mlir::LogicalResult verifySameEncoding(mlir::Type tyA, mlir::Type tyB) { + using namespace mlir; + auto encA = tyA.dyn_cast(); + auto encB = tyA.dyn_cast(); + if (!encA || !encB) + return success(); + return encA.getEncoding() == encB.getEncoding() ? success() : failure(); +} + +mlir::LogicalResult +mlir::OpTrait::impl::verifySameOperandsAndResultEncoding(Operation *op) { + if (failed(verifyAtLeastNOperands(op, 1)) || + failed(verifyAtLeastNResults(op, 1))) + return failure(); + + auto type = op->getOperand(0).getType(); + for (auto resultType : op->getResultTypes()) + if (failed(verifySameEncoding(resultType, type))) + return op->emitOpError() + << "requires the same encoding for all operands and results"; + return verifySameOperandsEncoding(op); +} + +mlir::LogicalResult +mlir::OpTrait::impl::verifySameOperandsEncoding(Operation *op) { + if (failed(verifyAtLeastNOperands(op, 1))) + return failure(); + + auto type = op->getOperand(0).getType(); + for (auto opType : llvm::drop_begin(op->getOperandTypes(), 1)) + if (failed(verifySameEncoding(opType, type))) + return op->emitOpError() << "requires the same encoding for all operands"; + + return success(); +} + +mlir::LogicalResult mlir::OpTrait::impl::verifyTensorSize(Operation *op) { + for (auto opType : op->getOperandTypes()) { + if (auto tensorType = opType.dyn_cast()) { + int64_t numElements = 1; + for (int64_t s : tensorType.getShape()) + numElements *= s; + if (numElements > maxTensorNumElements) + return op->emitError("Maximum allowed number of elements is ") + << maxTensorNumElements << ", but " << *op + << " has more than that"; + if ((numElements & (numElements - 1)) != 0) + return op->emitError("Number of elements must be power-of-two, but ") + << *op << " doesn't follow the rule (" << numElements << ")" + << " elements"; + } + } + for (auto opType : op->getResultTypes()) { + if (auto tensorType = opType.dyn_cast()) { + int64_t numElements = 1; + for (int64_t s : tensorType.getShape()) + numElements *= s; + if (numElements > maxTensorNumElements) + return op->emitError("Maximum allowed number of elements is ") + << maxTensorNumElements << ", but " << *op + << " has more than that"; + if ((numElements & (numElements - 1)) != 0) + return op->emitError("Number of elements must be power-of-two, but ") + << *op << " doesn't follow the rule (" << numElements << ")" + << " elements"; + } + } + return success(); +} diff --git a/lib/Dialect/Triton/IR/Types.cpp b/lib/Dialect/Triton/IR/Types.cpp new file mode 100644 index 000000000000..5884a2ec46ce --- /dev/null +++ b/lib/Dialect/Triton/IR/Types.cpp @@ -0,0 +1,39 @@ +#include "triton/Dialect/Triton/IR/Types.h" +#include "mlir/IR/DialectImplementation.h" // required by `Types.cpp.inc` +#include "triton/Dialect/Triton/IR/Dialect.h" +#include "llvm/ADT/TypeSwitch.h" // required by `Types.cpp.inc` + +using namespace mlir; +using namespace mlir::triton; + +#define GET_TYPEDEF_CLASSES +#include "triton/Dialect/Triton/IR/Types.cpp.inc" + +//===----------------------------------------------------------------------===// +// Triton Dialect +//===----------------------------------------------------------------------===// +void TritonDialect::registerTypes() { + addTypes< +#define GET_TYPEDEF_LIST +#include "triton/Dialect/Triton/IR/Types.cpp.inc" + >(); +} + +Type PointerType::parse(AsmParser &parser) { + if (parser.parseLess()) + return Type(); + + Type pointeeType; + if (parser.parseType(pointeeType)) + return Type(); + + if (parser.parseGreater()) + return Type(); + + // TODO: also print address space? + return PointerType::get(pointeeType, 1); +} + +void PointerType::print(AsmPrinter &printer) const { + printer << "<" << getPointeeType() << ">"; +} diff --git a/lib/Dialect/Triton/Transforms/CMakeLists.txt b/lib/Dialect/Triton/Transforms/CMakeLists.txt new file mode 100644 index 000000000000..072b23c6fb4a --- /dev/null +++ b/lib/Dialect/Triton/Transforms/CMakeLists.txt @@ -0,0 +1,11 @@ +set(LLVM_TARGET_DEFINITIONS Combine.td) +mlir_tablegen(TritonCombine.inc -gen-rewriters) +add_public_tablegen_target(TritonCombineIncGen) + +add_mlir_dialect_library(TritonTransforms + Combine.cpp + + DEPENDS + TritonTransformsIncGen + TritonCombineIncGen +) diff --git a/lib/Dialect/Triton/Transforms/Combine.cpp b/lib/Dialect/Triton/Transforms/Combine.cpp new file mode 100644 index 000000000000..2b72c14a42a2 --- /dev/null +++ b/lib/Dialect/Triton/Transforms/Combine.cpp @@ -0,0 +1,209 @@ +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/Matchers.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Support/LogicalResult.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" + +#include "triton/Dialect/Triton/IR/Dialect.h" +#include "triton/Dialect/Triton/Transforms/Passes.h" + +#include + +using namespace mlir; + +namespace { + +bool isZero(mlir::Value val) { + if (mlir::matchPattern(val, mlir::m_Zero()) || + mlir::matchPattern(val, mlir::m_AnyZeroFloat())) + return true; + // broadcast(constant_0) + if (auto bc = val.getDefiningOp()) { + if (mlir::matchPattern(bc.src(), mlir::m_Zero()) || + mlir::matchPattern(bc.src(), mlir::m_AnyZeroFloat())) + return true; + } + return false; +} + +bool isBroadcastConstantCombinable(Attribute value) { + if (auto denseValue = value.dyn_cast()) { + return denseValue.isSplat(); + } + return value.isa(); +} + +DenseElementsAttr getConstantValue(Builder &builder, Attribute value, + Value bcast_res) { + + Type resType = bcast_res.getType(); + DenseElementsAttr res; + if (auto denseValue = value.dyn_cast()) { + res = + DenseElementsAttr::get(resType, denseValue.getSplatValue()); + } else { + res = DenseElementsAttr::get(resType, value); + } + return res; +} + +#include "TritonCombine.inc" + +} // anonymous namespace + +// select(cond, load(ptrs, broadcast(cond), ???), other) +// => load(ptrs, broadcast(cond), other) +class CombineSelectMaskedLoadPattern : public mlir::RewritePattern { +public: + CombineSelectMaskedLoadPattern(mlir::MLIRContext *context) + : mlir::RewritePattern(mlir::SelectOp::getOperationName(), 3, context, + {triton::LoadOp::getOperationName()}) {} + + mlir::LogicalResult + matchAndRewrite(mlir::Operation *op, + mlir::PatternRewriter &rewriter) const override { + auto selectOp = llvm::dyn_cast(op); + if (!selectOp) + return mlir::failure(); + + mlir::Value trueValue = selectOp.getTrueValue(); + mlir::Value falseValue = selectOp.getFalseValue(); + + auto *loadOpCandidate = trueValue.getDefiningOp(); + auto loadOp = llvm::dyn_cast_or_null(loadOpCandidate); + if (!loadOp) + return mlir::failure(); + + mlir::Value mask = loadOp.mask(); + if (!mask) + return mlir::failure(); + + auto *broadcastOpCandidate = mask.getDefiningOp(); + auto broadcastOp = + llvm::dyn_cast_or_null(broadcastOpCandidate); + if (!broadcastOp) + return mlir::failure(); + + rewriter.replaceOpWithNewOp( + op, loadOp.ptr(), loadOp.mask(), falseValue, loadOp.cache(), + loadOp.evict(), loadOp.isVolatile()); + return mlir::success(); + } +}; + +// load(ptr, splat(1), ...) -> load(ptr, ...) +// load(ptr, splat(0), other, ...) -> other +struct CanonicalizeMaskedLoadPattern + : public mlir::OpRewritePattern { + CanonicalizeMaskedLoadPattern(mlir::MLIRContext *context) + : OpRewritePattern(context, 1) {} + + mlir::LogicalResult + matchAndRewrite(triton::LoadOp loadOp, + mlir::PatternRewriter &rewriter) const override { + auto mask = loadOp.mask(); + if (!mask) + return mlir::failure(); + + auto constantMask = + llvm::dyn_cast_or_null(mask.getDefiningOp()); + if (!constantMask) + return mlir::failure(); + + auto splatMask = constantMask.getValue().dyn_cast(); + if (!splatMask) + return mlir::failure(); + + if (splatMask.getSplatValue().getValue() == true) { + // mask = splat(1) + rewriter.replaceOpWithNewOp( + loadOp, loadOp.getType(), loadOp.ptr(), Value(), Value(), + loadOp.cache(), loadOp.evict(), loadOp.isVolatile()); + } else { + // mask = splat(0) + + // If there's no "other", the value is "undef". Perhaps we want to + // optimize it in the future.x + auto otherVal = loadOp.other(); + if (!otherVal) + return mlir::failure(); + rewriter.replaceOp(loadOp, otherVal); + } + return mlir::success(); + } +}; + +void triton::LoadOp::getCanonicalizationPatterns(RewritePatternSet &results, + MLIRContext *context) { + results.add(context); +} + +// store(ptr, value, splat(1), ...) -> store(ptr, value, ...) +// store(ptr, value, splat(0), ...) -> [none] +struct CanonicalizeMaskedStorePattern + : public mlir::OpRewritePattern { + CanonicalizeMaskedStorePattern(mlir::MLIRContext *context) + : OpRewritePattern(context, 1) {} + + mlir::LogicalResult + matchAndRewrite(triton::StoreOp storeOp, + mlir::PatternRewriter &rewriter) const override { + auto mask = storeOp.mask(); + if (!mask) + return mlir::failure(); + + auto constantMask = + llvm::dyn_cast_or_null(mask.getDefiningOp()); + if (!constantMask) + return mlir::failure(); + + auto splatMask = constantMask.getValue().dyn_cast(); + if (!splatMask) + return mlir::failure(); + + if (splatMask.getSplatValue().getValue() == true) { + // mask = splat(1) + rewriter.replaceOpWithNewOp(storeOp, storeOp.ptr(), + storeOp.value()); + } else { + // mask = splat(0) + rewriter.eraseOp(storeOp); + } + return mlir::success(); + } +}; + +void triton::StoreOp::getCanonicalizationPatterns(RewritePatternSet &results, + MLIRContext *context) { + results.add(context); +} + +#define GEN_PASS_CLASSES +#include "triton/Dialect/Triton/Transforms/Passes.h.inc" + +class CombineOpsPass : public TritonCombineOpsBase { +public: + void runOnOperation() override { + mlir::MLIRContext *context = &getContext(); + mlir::RewritePatternSet patterns(context); + mlir::ModuleOp m = getOperation(); + + // Dot Add %{ + patterns.add(context); + patterns.add(context); + patterns.add(context); + patterns.add(context); + // %} + patterns.add(context); + // patterns.add(context); + patterns.add(context); + + if (applyPatternsAndFoldGreedily(m, std::move(patterns)).failed()) + signalPassFailure(); + } +}; + +std::unique_ptr mlir::triton::createCombineOpsPass() { + return std::make_unique(); +} diff --git a/lib/Dialect/Triton/Transforms/Combine.td b/lib/Dialect/Triton/Transforms/Combine.td new file mode 100644 index 000000000000..14f286b26e0a --- /dev/null +++ b/lib/Dialect/Triton/Transforms/Combine.td @@ -0,0 +1,48 @@ +#ifndef TRITON_PATTERNS +#define TRITON_PATTERNS + +include "mlir/Dialect/StandardOps/IR/Ops.td" +include "mlir/Dialect/Arithmetic/IR/ArithmeticOps.td" +include "triton/Dialect/Triton/IR/TritonOps.td" + + +// AddIOp(DotOp(a, b, c), d) and c==0 => DotOp(a, b, d) +// AddFOp(DotOp(a, b, c), d) and c==0 => DotOp(a, b, d) + +// AddIOp(d, DotOp(a, b, c)) and c==0 => DotOp(a, b, d) +// AddFOp(d, DotOp(a, b, c)) and c==0 => DotOp(a, b, d) +def CombineDotAddIPattern : Pat< + (Arith_AddIOp $d, (TT_DotOp:$res $a, $b, $c, $allowTF32)), + (TT_DotOp $a, $b, $d, $allowTF32), + [(Constraint> $c)]>; +def CombineDotAddFPattern : Pat< + (Arith_AddFOp $d, (TT_DotOp:$res $a, $b, $c, $allowTF32)), + (TT_DotOp $a, $b, $d, $allowTF32), + [(Constraint> $c)]>; + +def CombineDotAddIRevPattern : Pat< + (Arith_AddIOp (TT_DotOp:$res $a, $b, $c, $allowTF32), $d), + (TT_DotOp $a, $b, $d, $allowTF32), + [(Constraint> $c)]>; +def CombineDotAddFRevPattern : Pat< + (Arith_AddFOp (TT_DotOp:$res $a, $b, $c, $allowTF32), $d), + (TT_DotOp $a, $b, $d, $allowTF32), + [(Constraint> $c)]>; + +// TODO: this fails for addptr(addptr(ptr, i32), i64) +// Commented out until fixed +// addptr(addptr(%ptr, %idx0), %idx1) => addptr(%ptr, AddI(%idx0, %idx1)) +// Note: leave (sub %c0, %c0) canceling to ArithmeticDialect +// (ref: ArithmeticCanonicalization.td) +// def CombineAddPtrPattern : Pat< +// (TT_AddPtrOp (TT_AddPtrOp $ptr, $idx0), $idx1), +// (TT_AddPtrOp $ptr, (Arith_AddIOp $idx0, $idx1))>; + +// broadcast(cst) => cst +def getConstantValue : NativeCodeCall<"getConstantValue($_builder, $0, $1)">; +def CombineBroadcastConstantPattern : Pat< + (TT_BroadcastOp:$bcast_res (Arith_ConstantOp $value)), + (Arith_ConstantOp (getConstantValue $value, $bcast_res)), + [(Constraint> $value)]>; + +#endif diff --git a/lib/Dialect/TritonGPU/CMakeLists.txt b/lib/Dialect/TritonGPU/CMakeLists.txt new file mode 100644 index 000000000000..9f57627c321f --- /dev/null +++ b/lib/Dialect/TritonGPU/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(IR) +add_subdirectory(Transforms) diff --git a/lib/Dialect/TritonGPU/IR/CMakeLists.txt b/lib/Dialect/TritonGPU/IR/CMakeLists.txt new file mode 100644 index 000000000000..903dfc318467 --- /dev/null +++ b/lib/Dialect/TritonGPU/IR/CMakeLists.txt @@ -0,0 +1,11 @@ +add_mlir_dialect_library(TritonGPUIR + Dialect.cpp + Traits.cpp + + DEPENDS + TritonGPUTableGen + TritonGPUAttrDefsIncGen + + LINK_LIBS PUBLIC + TritonIR +) diff --git a/lib/Dialect/TritonGPU/IR/Dialect.cpp b/lib/Dialect/TritonGPU/IR/Dialect.cpp new file mode 100644 index 000000000000..d671f377d629 --- /dev/null +++ b/lib/Dialect/TritonGPU/IR/Dialect.cpp @@ -0,0 +1,783 @@ +#include + +#include "mlir/IR/DialectImplementation.h" +#include "mlir/IR/OpImplementation.h" +#include "triton/Analysis/Utility.h" +#include "triton/Dialect/Triton/IR/Dialect.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" +#include "llvm/ADT/TypeSwitch.h" + +#include "triton/Dialect/TritonGPU/IR/Dialect.cpp.inc" + +using namespace mlir; +using namespace mlir::triton::gpu; + +// Utility +namespace mlir { +namespace triton { + +// Type inference +static Type getI1SameShape(Type type) { + auto i1Type = IntegerType::get(type.getContext(), 1); + if (auto tensorType = type.dyn_cast()) + return RankedTensorType::get(tensorType.getShape(), i1Type, + tensorType.getEncoding()); + return Type(); +} + +static Type getPointeeType(Type type) { + if (auto tensorType = type.dyn_cast()) { + // Tensor of pointers + auto shape = tensorType.getShape(); + auto ptrType = tensorType.getElementType().dyn_cast(); + Type pointeeType = ptrType.getPointeeType(); + return RankedTensorType::get(shape, pointeeType, tensorType.getEncoding()); + } else if (auto ptrType = type.dyn_cast()) { + // scalar pointer + Type pointeeType = ptrType.getPointeeType(); + return pointeeType; + } + return Type(); +} + +namespace gpu { + +// TODO: Inheritance of layout attributes +// so that all distributed layouts implement +// these utilities + +unsigned getElemsPerThread(Attribute layout, ArrayRef shape) { + if (auto blockedLayout = layout.dyn_cast()) { + return blockedLayout.getElemsPerThread(shape); + } else if (auto sliceLayout = layout.dyn_cast()) { + return sliceLayout.getElemsPerThread(shape); + } else if (auto mmaLayout = layout.dyn_cast()) { + return mmaLayout.getElemsPerThread(shape); + } else if (auto sharedLayout = layout.dyn_cast()) { + return sharedLayout.getElemsPerThread(shape); + } else if (auto dotLayout = layout.dyn_cast()) { + return dotLayout.getElemsPerThread(shape); + } else { + assert(0 && "getElemsPerThread not implemented"); + return 0; + } +} + +unsigned getElemsPerThread(Type type) { + if (type.isIntOrIndexOrFloat() || type.isa() || + type.isa()) + return 1; + auto tensorType = type.cast(); + return getElemsPerThread(tensorType.getEncoding(), tensorType.getShape()); +} + +SmallVector getThreadsPerWarp(const Attribute &layout) { + if (auto blockedLayout = layout.dyn_cast()) { + return SmallVector(blockedLayout.getThreadsPerWarp().begin(), + blockedLayout.getThreadsPerWarp().end()); + } + if (auto mmaLayout = layout.dyn_cast()) { + if (mmaLayout.isVolta()) + return {4, 8}; + if (mmaLayout.isAmpere()) + return {8, 4}; + } + assert(0 && "getThreadsPerWarp not implemented"); + return {}; +} + +SmallVector getWarpsPerCTA(const Attribute &layout) { + if (auto blockedLayout = layout.dyn_cast()) { + return SmallVector(blockedLayout.getWarpsPerCTA().begin(), + blockedLayout.getWarpsPerCTA().end()); + } + if (auto mmaLayout = layout.dyn_cast()) { + return SmallVector(mmaLayout.getWarpsPerCTA().begin(), + mmaLayout.getWarpsPerCTA().end()); + } + assert(0 && "getWarpsPerCTA not implemented"); + return {}; +} + +SmallVector getSizePerThread(const Attribute &layout) { + if (auto blockedLayout = layout.dyn_cast()) { + return SmallVector(blockedLayout.getSizePerThread().begin(), + blockedLayout.getSizePerThread().end()); + } else if (auto sliceLayout = layout.dyn_cast()) { + return getSizePerThread(sliceLayout.getParent()); + } else if (auto mmaLayout = layout.dyn_cast()) { + if (mmaLayout.isAmpere()) { + return {2, 2}; + } else if (mmaLayout.isVolta()) { + // Note: here the definition of sizePerThread is obscure, which doesn't + // mean vecSize=4 can be supported in the last dimension. + return {2, 4}; + } else { + llvm_unreachable("Unexpected mma version"); + } + } else if (auto dotLayout = layout.dyn_cast()) { + auto parentLayout = dotLayout.getParent(); + assert(parentLayout && "DotOperandEncodingAttr must have a parent"); + if (auto parentMmaLayout = parentLayout.dyn_cast()) { + assert(parentMmaLayout.isAmpere() && + "mmaLayout version = 1 is not implemented yet"); + auto parentShapePerCTA = getShapePerCTA(parentLayout); + auto opIdx = dotLayout.getOpIdx(); + if (opIdx == 0) { + return {2, 4}; + } else if (opIdx == 1) { + return {4, 1}; + } else { + assert(0 && "DotOperandEncodingAttr opIdx must be 0 or 1"); + return {}; + } + } else { + assert(0 && "DotOperandEncodingAttr non-MmaEncodingAttr parent not " + "supported yet"); + return {}; + } + } else { + assert(0 && "getSizePerThread not implemented"); + return {}; + } +} + +SmallVector getContigPerThread(Attribute layout) { + if (auto mmaLayout = layout.dyn_cast()) { + assert(mmaLayout.isVolta() || mmaLayout.isAmpere()); + return {1, 2}; + } else { + return getSizePerThread(layout); + } +} + +SmallVector getThreadsPerCTA(const Attribute &layout) { + SmallVector threads; + if (auto blockedLayout = layout.dyn_cast()) { + for (int d = 0, n = blockedLayout.getOrder().size(); d < n; ++d) + threads.push_back(blockedLayout.getThreadsPerWarp()[d] * + blockedLayout.getWarpsPerCTA()[d]); + } else if (auto mmaLayout = layout.dyn_cast()) { + assert(0 && "Unimplemented usage of MmaEncodingAttr"); + } else { + assert(0 && "Unimplemented usage of getShapePerCTA"); + } + + return threads; +} + +SmallVector getShapePerCTA(const Attribute &layout) { + SmallVector shape; + if (auto blockedLayout = layout.dyn_cast()) { + for (unsigned d = 0, n = blockedLayout.getOrder().size(); d < n; ++d) + shape.push_back(blockedLayout.getSizePerThread()[d] * + blockedLayout.getThreadsPerWarp()[d] * + blockedLayout.getWarpsPerCTA()[d]); + } else if (auto sliceLayout = layout.dyn_cast()) { + unsigned dim = sliceLayout.getDim(); + auto parent = sliceLayout.getParent(); + for (unsigned d = 0, n = getOrder(parent).size(); d < n; ++d) { + if (d == dim) + continue; + shape.push_back(getShapePerCTA(parent)[d]); + } + } else if (auto mmaLayout = layout.dyn_cast()) { + if (mmaLayout.isAmpere()) + return {16 * mmaLayout.getWarpsPerCTA()[0], + 8 * mmaLayout.getWarpsPerCTA()[1]}; + if (mmaLayout.isVolta()) + return {16 * mmaLayout.getWarpsPerCTA()[0], + 16 * mmaLayout.getWarpsPerCTA()[1]}; + assert(0 && "Unexpected MMA layout version found"); + } else if (auto dotLayout = layout.dyn_cast()) { + auto parentLayout = dotLayout.getParent(); + assert(parentLayout && "DotOperandEncodingAttr must have a parent"); + if (auto parentMmaLayout = parentLayout.dyn_cast()) { + assert(parentMmaLayout.isAmpere() && + "mmaLayout version = 1 is not implemented yet"); + auto parentShapePerCTA = getShapePerCTA(parentLayout); + auto opIdx = dotLayout.getOpIdx(); + if (opIdx == 0) { + return {parentShapePerCTA[0], 16}; + } else if (opIdx == 1) { + return {16, parentShapePerCTA[1]}; + } else { + assert(0 && "DotOperandEncodingAttr opIdx must be 0 or 1"); + } + } else { + assert(0 && "DotOperandEncodingAttr non-MmaEncodingAttr parent not " + "supported yet"); + } + } else if (auto mmaLayout = layout.dyn_cast()) { + if (mmaLayout.isAmpere()) { + return {16 * mmaLayout.getWarpsPerCTA()[0], + 8 * mmaLayout.getWarpsPerCTA()[1]}; + } else if (mmaLayout.isVolta()) { + return {16 * mmaLayout.getWarpsPerCTA()[0], + 16 * mmaLayout.getWarpsPerCTA()[1]}; + } else { + llvm_unreachable("Unexpected mma version"); + } + } else { + assert(0 && "Unimplemented usage of getShapePerCTA"); + } + return shape; +} + +SmallVector getOrder(const Attribute &layout) { + if (auto blockedLayout = layout.dyn_cast()) { + return SmallVector(blockedLayout.getOrder().begin(), + blockedLayout.getOrder().end()); + } else if (auto mmaLayout = layout.dyn_cast()) { + return {1, 0}; + } else if (auto dotLayout = layout.dyn_cast()) { + return {1, 0}; + } else if (auto sliceLayout = layout.dyn_cast()) { + SmallVector parentOrder = getOrder(sliceLayout.getParent()); + unsigned dim = sliceLayout.getDim(); + SmallVector order; + for (unsigned d : parentOrder) { + if (d == dim) + continue; + else if (d > dim) + order.push_back(d - 1); + else + order.push_back(d); + } + return order; + } else if (auto sharedLayout = layout.dyn_cast()) { + return SmallVector(sharedLayout.getOrder().begin(), + sharedLayout.getOrder().end()); + } else { + assert(0 && "Unimplemented usage of getOrder"); + return {}; + } +}; + +} // namespace gpu +} // namespace triton +} // namespace mlir + +static LogicalResult parseIntAttrValue(AsmParser &parser, const Attribute &attr, + unsigned &value, StringRef desc) { + auto intAttr = attr.dyn_cast(); + if (!intAttr) { + parser.emitError(parser.getNameLoc(), "expected an integer type in ") + << desc; + return failure(); + } + if (intAttr.getType().isSignedInteger()) { + int64_t attrVal = intAttr.getSInt(); + if (attrVal < 0) { + parser.emitError(parser.getNameLoc(), + "expected an unsigned integer value in ") + << desc; + return failure(); + } + value = attrVal; + } else if (intAttr.getType().isSignlessInteger()) { + int64_t attrVal = intAttr.getInt(); + if (attrVal < 0) { + parser.emitError(parser.getNameLoc(), + "expected an unsigned integer value in ") + << desc; + return failure(); + } + value = attrVal; + } else { + value = intAttr.getUInt(); + } + return success(); +} + +// parse an array of integers +static LogicalResult parseIntArrayAttr(AsmParser &parser, + const NamedAttribute &attr, + SmallVector &res, + StringRef desc) { + auto arrayAttr = attr.getValue().dyn_cast(); + if (!arrayAttr) { + parser.emitError(parser.getNameLoc(), "expected an array for ") << desc; + return failure(); + } + for (Attribute i : arrayAttr) { + unsigned value; + if (parseIntAttrValue(parser, i, value, desc).failed()) + return failure(); + res.push_back(value); + } + return success(); +}; + +static LogicalResult parseUInt(AsmParser &parser, const NamedAttribute &attr, + unsigned &value, StringRef desc) { + return parseIntAttrValue(parser, attr.getValue(), value, desc); +}; + +//===----------------------------------------------------------------------===// +// Attribute methods +//===----------------------------------------------------------------------===// +#define GET_ATTRDEF_CLASSES +#include "triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.cpp.inc" + +SliceEncodingAttr BlockedEncodingAttr::squeeze(int axis) { + return SliceEncodingAttr::get(getContext(), axis, *this); +} + +unsigned BlockedEncodingAttr::getElemsPerThread(ArrayRef shape) const { + size_t rank = shape.size(); + auto sizePerThread = getSizePerThread(); + auto warpsPerCTA = getWarpsPerCTA(); + auto threadsPerWarp = getThreadsPerWarp(); + assert(rank == sizePerThread.size() && + "unexpected rank in BlockedEncodingAttr::getElemsPerThread"); + SmallVector elemsPerThread(rank); + for (size_t i = 0; i < rank; ++i) { + unsigned t = sizePerThread[i] * threadsPerWarp[i] * warpsPerCTA[i]; + elemsPerThread[i] = ceil(shape[i], t) * sizePerThread[i]; + } + return product(elemsPerThread); +} + +template +SmallVector SliceEncodingAttr::paddedShape(ArrayRef shape) const { + size_t rank = shape.size(); + unsigned dim = getDim(); + SmallVector retShape(rank + 1); + for (unsigned d = 0; d < rank + 1; ++d) { + if (d < dim) + retShape[d] = shape[d]; + else if (d == dim) + retShape[d] = 1; + else + retShape[d] = shape[d - 1]; + } + return retShape; +} +template SmallVector +SliceEncodingAttr::paddedShape(ArrayRef shape) const; +template SmallVector +SliceEncodingAttr::paddedShape(ArrayRef shape) const; + +unsigned SliceEncodingAttr::getElemsPerThread(ArrayRef shape) const { + size_t rank = shape.size(); + auto parent = getParent(); + return ::getElemsPerThread(parent, paddedShape(shape)); +} + +unsigned MmaEncodingAttr::getElemsPerThread(ArrayRef shape) const { + size_t rank = shape.size(); + assert(rank == 2 && "Unexpected rank of mma layout"); + assert((isVolta() || isAmpere()) && "Only version 1 and 2 is supported"); + + int res = 0; + if (isVolta()) { + unsigned mmasRow = ceil(shape[0], 16 * getWarpsPerCTA()[0]); + unsigned mmasCol = ceil(shape[1], 16 * getWarpsPerCTA()[1]); + // Each warp-level mma884 will perform a m16xn16xk4 mma, thus get a m16xn16 + // matrix as result. + res = mmasRow * mmasCol * (16 * 16 / 32); + } else if (isAmpere()) { + unsigned elemsCol = ceil(shape[0], 16 * getWarpsPerCTA()[0]) * 2; + unsigned elemsRow = ceil(shape[1], 8 * getWarpsPerCTA()[1]) * 2; + res = elemsCol * elemsRow; + } else { + llvm_unreachable("Unexpected mma version"); + } + + return res; +} + +unsigned SharedEncodingAttr::getElemsPerThread(ArrayRef shape) const { + // TODO: + assert(0 && "SharedEncodingAttr::getElemsPerThread not implemented"); + return 0; +} + +unsigned +DotOperandEncodingAttr::getElemsPerThread(ArrayRef shape) const { + if (auto blockedLayout = getParent().dyn_cast()) { + return blockedLayout.getElemsPerThread(shape); + } + assert(0 && "DotOperandEncodingAttr::getElemsPerThread not implemented"); + return 0; +} + +//===----------------------------------------------------------------------===// +// Blocked Encoding +//===----------------------------------------------------------------------===// + +Attribute BlockedEncodingAttr::parse(AsmParser &parser, Type type) { + if (parser.parseLess().failed()) + return {}; + // Parse the data as a dictionary + DictionaryAttr dict; + if (parser.parseAttribute(dict).failed()) + return {}; + if (parser.parseGreater().failed()) + return {}; + + SmallVector sizePerThread; + SmallVector threadsPerWarp; + SmallVector warpsPerCTA; + SmallVector order; + + for (const NamedAttribute &attr : dict) { + if (attr.getName() == "sizePerThread") { + if (parseIntArrayAttr(parser, attr, sizePerThread, + "number of elements per thread") + .failed()) + return {}; + } else if (attr.getName() == "threadsPerWarp") { + if (parseIntArrayAttr(parser, attr, threadsPerWarp, + "number of threads per warp") + .failed()) + return {}; + } else if (attr.getName() == "warpsPerCTA") { + if (parseIntArrayAttr(parser, attr, warpsPerCTA, + "number of warps per CTA") + .failed()) + return {}; + } else if (attr.getName() == "order") { + if (parseIntArrayAttr(parser, attr, order, "order").failed()) + return {}; + } else { + parser.emitError(parser.getNameLoc(), "unexpected key: ") + << attr.getName().strref(); + return {}; + } + } + + auto ret = parser.getChecked( + parser.getContext(), sizePerThread, threadsPerWarp, warpsPerCTA, order); + return ret; +} + +void BlockedEncodingAttr::print(mlir::AsmPrinter &printer) const { + printer << "<{" + << "sizePerThread = [" << getSizePerThread() << "]" + << ", threadsPerWarp = [" << getThreadsPerWarp() << "]" + << ", warpsPerCTA = [" << getWarpsPerCTA() << "]" + << ", order = [" << getOrder() << "]" + << "}>"; +} + +//===----------------------------------------------------------------------===// +// MMA encoding +//===----------------------------------------------------------------------===// + +Attribute MmaEncodingAttr::parse(AsmParser &parser, Type type) { + if (parser.parseLess().failed()) + return {}; + DictionaryAttr dict; + if (parser.parseAttribute(dict).failed()) + return {}; + if (parser.parseGreater().failed()) + return {}; + + unsigned versionMajor = 0; + unsigned versionMinor = 0; + SmallVector warpsPerCTA; + + for (const NamedAttribute &attr : dict) { + if (attr.getName() == "versionMajor") { + if (parseUInt(parser, attr, versionMajor, "versionMajor").failed()) + return {}; + } + if (attr.getName() == "versionMinor") { + if (parseUInt(parser, attr, versionMinor, "versionMinor").failed()) + return {}; + } + if (attr.getName() == "warpsPerCTA") { + if (parseIntArrayAttr(parser, attr, warpsPerCTA, "warpsPerCTA").failed()) + return {}; + } + } + + return parser.getChecked(parser.getContext(), versionMajor, + versionMinor, warpsPerCTA); +} + +void MmaEncodingAttr::print(AsmPrinter &printer) const { + printer << "<{" + << "versionMajor = " << getVersionMajor() << ", " + << "versionMinor = " << getVersionMinor() << ", " + << "warpsPerCTA = [" << getWarpsPerCTA() << "]" + << "}>"; +} + +//===----------------------------------------------------------------------===// +// Sliced Encoding +//===----------------------------------------------------------------------===// + +Attribute SliceEncodingAttr::parse(AsmParser &parser, Type type) { + if (parser.parseLess().failed()) + return {}; + NamedAttrList attrs; + if (parser.parseOptionalAttrDict(attrs).failed()) + return {}; + if (parser.parseGreater().failed()) + return {}; + unsigned dim = attrs.get("dim").cast().getInt(); + Attribute parent = attrs.get("parent"); + return parser.getChecked(parser.getContext(), dim, parent); +} + +void SliceEncodingAttr::print(mlir::AsmPrinter &printer) const { + printer << "<{" + << "dim = " << getDim() << ", " + << "parent = " << getParent() << "}>"; +} + +//===----------------------------------------------------------------------===// +// Shared encoding +//===----------------------------------------------------------------------===// + +Attribute SharedEncodingAttr::parse(AsmParser &parser, Type type) { + if (parser.parseLess().failed()) + return {}; + // Parse the data as a dictionary + DictionaryAttr dict; + if (parser.parseAttribute(dict).failed()) + return {}; + if (parser.parseGreater().failed()) + return {}; + + unsigned vec = 0; + unsigned perPhase = 0; + unsigned maxPhase = 0; + SmallVector order; + + for (const NamedAttribute &attr : dict) { + if (attr.getName() == "vec") { + if (parseUInt(parser, attr, vec, "vec").failed()) + return {}; + } else if (attr.getName() == "perPhase") { + if (parseUInt(parser, attr, perPhase, "perPhase").failed()) + return {}; + } else if (attr.getName() == "maxPhase") { + if (parseUInt(parser, attr, maxPhase, "maxPhase").failed()) + return {}; + } else if (attr.getName() == "order") { + if (parseIntArrayAttr(parser, attr, order, "order").failed()) + return {}; + } else { + parser.emitError(parser.getNameLoc(), "unexpected key: ") + << attr.getName().strref(); + return {}; + } + } + + return parser.getChecked(parser.getContext(), vec, + perPhase, maxPhase, order); +} + +void SharedEncodingAttr::print(AsmPrinter &printer) const { + printer << "<{" + << "vec = " << getVec() << ", perPhase = " << getPerPhase() + << ", maxPhase = " << getMaxPhase() << ", order = [" << getOrder() + << "]" + << "}>"; +} + +//===----------------------------------------------------------------------===// +// Mma encoding +//===----------------------------------------------------------------------===// + +bool MmaEncodingAttr::isVolta() const { return getVersionMajor() == 1; } + +bool MmaEncodingAttr::isAmpere() const { return getVersionMajor() == 2; } + +// Get [isARow, isBRow, isAVec4, isBVec4] from versionMinor +std::tuple +MmaEncodingAttr::decodeVoltaLayoutStates() const { + unsigned versionMinor = getVersionMinor(); + bool isARow = versionMinor & (1 << 0); + bool isBRow = versionMinor & (1 << 1); + bool isAVec4 = versionMinor & (1 << 2); + bool isBVec4 = versionMinor & (1 << 3); + return std::make_tuple(isARow, isBRow, isAVec4, isBVec4); +} + +//===----------------------------------------------------------------------===// +// DotOperand Encoding +//===----------------------------------------------------------------------===// +Attribute DotOperandEncodingAttr::parse(AsmParser &parser, Type type) { + if (parser.parseLess().failed()) + return {}; + NamedAttrList attrs; + if (parser.parseOptionalAttrDict(attrs).failed()) + return {}; + if (parser.parseGreater().failed()) + return {}; + unsigned opIdx = attrs.get("opIdx").cast().getInt(); + Attribute parent = attrs.get("parent"); + Attribute isMMAv1Row; + if (parent.isa() && + parent.cast().isVolta()) { + isMMAv1Row = attrs.get("isMMAv1Row"); + if (!isMMAv1Row) + llvm::report_fatal_error("isMMAv1Row attribute is missing"); + } + return parser.getChecked(parser.getContext(), opIdx, + parent, isMMAv1Row); +} + +void DotOperandEncodingAttr::print(mlir::AsmPrinter &printer) const { + printer << "<{" + << "opIdx = " << getOpIdx() << ", " + << "parent = " << getParent(); + if (getIsMMAv1Row()) + printer << ", isMMAv1Row = " << getIsMMAv1Row(); + printer << "}>"; +} + +//===----------------------------------------------------------------------===// +// InsertSliceAsyncOp +//===----------------------------------------------------------------------===// + +ParseResult parseInsertSliceAsyncOp(OpAsmParser &parser, + OperationState &result) { + SmallVector allOperands; + Type srcType, dstType; + SMLoc allOperandLoc = parser.getCurrentLocation(); + if (parser.parseOperandList(allOperands) || + parser.parseOptionalAttrDict(result.attributes) || parser.parseColon() || + parser.parseCustomTypeWithFallback(srcType) || parser.parseArrow() || + parser.parseCustomTypeWithFallback(dstType)) + return failure(); + result.addTypes(dstType); + + SmallVector operandTypes; + operandTypes.push_back(srcType); // src + operandTypes.push_back(dstType); // dst + operandTypes.push_back( + IntegerType::get(parser.getBuilder().getContext(), 32)); // index + + int hasMask = 0, hasOther = 0; + if (allOperands.size() >= 4) { + operandTypes.push_back(triton::getI1SameShape(srcType)); // mask + hasMask = 1; + } + if (allOperands.size() >= 5) { + operandTypes.push_back(triton::getPointeeType(srcType)); // other + hasOther = 1; + } + + if (parser.resolveOperands(allOperands, operandTypes, allOperandLoc, + result.operands)) + return failure(); + + // Deduce operand_segment_sizes from the number of the operands. + auto operand_segment_sizesAttrName = + InsertSliceAsyncOp::operand_segment_sizesAttrName(result.name); + result.addAttribute( + operand_segment_sizesAttrName, + parser.getBuilder().getI32VectorAttr({1, 1, 1, hasMask, hasOther})); + return success(); +} + +void printInsertSliceAsyncOp(OpAsmPrinter &printer, + InsertSliceAsyncOp insertSliceAsyncOp) { + printer << " "; + printer << insertSliceAsyncOp.getOperation()->getOperands(); + // "operand_segment_sizes" can be deduced, so we don't print it. + printer.printOptionalAttrDict( + insertSliceAsyncOp->getAttrs(), + {insertSliceAsyncOp.operand_segment_sizesAttrName()}); + printer << " : "; + printer.printStrippedAttrOrType(insertSliceAsyncOp.src().getType()); + printer << " -> "; + printer.printStrippedAttrOrType(insertSliceAsyncOp.result().getType()); +} + +//===----------------------------------------------------------------------===// +// ASM Interface (i.e.: alias) +//===----------------------------------------------------------------------===// + +class TritonGPUOpAsmInterface : public OpAsmDialectInterface { +public: + using OpAsmDialectInterface::OpAsmDialectInterface; + + AliasResult getAlias(Attribute attr, raw_ostream &os) const override { + if (auto mmaAttr = attr.dyn_cast()) { + os << "mma"; + return AliasResult::FinalAlias; + } else if (auto sharedAttr = attr.dyn_cast()) { + os << "shared"; + return AliasResult::FinalAlias; + } else if (auto blockedAttr = attr.dyn_cast()) { + os << "blocked"; + return AliasResult::FinalAlias; + } /* else if (auto sliceAttr = attr.dyn_cast()) { + os << "slice"; + return AliasResult::FinalAlias; + } */ + return OpAsmDialectInterface::getAlias(attr, os); + } +}; + +struct TritonGPUInferLayoutInterface + : public triton::DialectInferLayoutInterface { + using DialectInferLayoutInterface::DialectInferLayoutInterface; + + LogicalResult + inferReduceOpEncoding(Attribute operandEncoding, unsigned axis, + Attribute &resultEncoding) const override { + resultEncoding = SliceEncodingAttr::get(getDialect()->getContext(), axis, + operandEncoding); + return success(); + } + + LogicalResult + inferExpandDimsOpEncoding(Attribute operandEncoding, unsigned axis, + Attribute &resultEncoding, + Optional location) const override { + auto sliceEncoding = operandEncoding.dyn_cast(); + if (!sliceEncoding) + return emitOptionalError( + location, "ExpandDimsOp operand encoding must be SliceEncodingAttr"); + if (sliceEncoding.getDim() != axis) + return emitOptionalError( + location, "Incompatible slice dimension for ExpandDimsOp operand"); + resultEncoding = sliceEncoding.getParent(); + return success(); + } + + LogicalResult inferDotOpEncoding(Attribute operandEncoding, unsigned opIdx, + Attribute retEncoding, + Optional location) const override { + if (auto dotOpEnc = operandEncoding.dyn_cast()) { + if (opIdx != dotOpEnc.getOpIdx()) + return emitOptionalError(location, "Wrong opIdx"); + if (retEncoding != dotOpEnc.getParent()) + return emitOptionalError(location, "Incompatible parent encoding"); + } else + return emitOptionalError( + location, "Dot's a/b's encoding should be of DotOperandEncodingAttr"); + return success(); + } +}; + +void TritonGPUDialect::initialize() { + addAttributes< +#define GET_ATTRDEF_LIST +#include "triton/Dialect/TritonGPU/IR/TritonGPUAttrDefs.cpp.inc" + >(); + addOperations< +#define GET_OP_LIST +#include "triton/Dialect/TritonGPU/IR/Ops.cpp.inc" + >(); + addInterfaces(); + addInterfaces(); +} + +#define GET_OP_CLASSES +#include "triton/Dialect/TritonGPU/IR/Ops.cpp.inc" + +// verify TritonGPU ops +LogicalResult TritonGPUDialect::verifyOperationAttribute(Operation *op, + NamedAttribute attr) { + // TODO: fill this. + return success(); +} diff --git a/lib/Dialect/TritonGPU/IR/Traits.cpp b/lib/Dialect/TritonGPU/IR/Traits.cpp new file mode 100644 index 000000000000..03253e12c1d7 --- /dev/null +++ b/lib/Dialect/TritonGPU/IR/Traits.cpp @@ -0,0 +1,14 @@ +#include "triton/Dialect/TritonGPU/IR/Traits.h" +#include "triton/Analysis/Utility.h" + +mlir::LogicalResult +mlir::OpTrait::impl::verifyResultsAreSharedEncoding(Operation *op) { + if (failed(verifyAtLeastNResults(op, 1))) + return failure(); + + for (auto result : op->getResults()) + if (!isSharedEncoding(result)) + return op->emitOpError() << "requires all results to be shared encoding"; + + return success(); +}; diff --git a/lib/Dialect/TritonGPU/Transforms/CMakeLists.txt b/lib/Dialect/TritonGPU/Transforms/CMakeLists.txt new file mode 100644 index 000000000000..aabcc1901161 --- /dev/null +++ b/lib/Dialect/TritonGPU/Transforms/CMakeLists.txt @@ -0,0 +1,21 @@ +set(LLVM_TARGET_DEFINITIONS Combine.td) +mlir_tablegen(TritonGPUCombine.inc -gen-rewriters) +add_public_tablegen_target(TritonGPUCombineIncGen) + +add_mlir_dialect_library(TritonGPUTransforms + Coalesce.cpp + CanonicalizeLoops.cpp + Combine.cpp + Pipeline.cpp + Prefetch.cpp + TritonGPUConversion.cpp + + DEPENDS + TritonGPUTransformsIncGen + TritonGPUCombineIncGen + + LINK_LIBS PUBLIC + TritonIR + TritonGPUIR + MLIRTransformUtils +) diff --git a/lib/Dialect/TritonGPU/Transforms/CanonicalizeLoops.cpp b/lib/Dialect/TritonGPU/Transforms/CanonicalizeLoops.cpp new file mode 100644 index 000000000000..462f393c4d90 --- /dev/null +++ b/lib/Dialect/TritonGPU/Transforms/CanonicalizeLoops.cpp @@ -0,0 +1,55 @@ +#include "mlir/Analysis/SliceAnalysis.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" +#include "triton/Dialect/TritonGPU/Transforms/Passes.h" + +using namespace mlir; +using namespace mlir::triton; + +#define GEN_PASS_CLASSES +#include "triton/Dialect/TritonGPU/Transforms/Passes.h.inc" + +namespace { + +struct CanonicalizePass + : public TritonGPUCanonicalizeLoopsBase { + CanonicalizePass() = default; + + void runOnOperation() override { + + // Canonicalize pass may have created dead code that + // standard scf.for canonicalization cannot handle + // as of LLVM 14. For example, the iteration arguments + // for the pointer of the synchronous loads that are + // discarded. + // The following piece of code is a workaround to + // very crudely remove dead code, by making an iteration + // argument yield itself if it is not used to create + // side effects anywhere. + getOperation()->walk([&](scf::ForOp forOp) -> void { + for (size_t i = 0; i < forOp.getNumResults(); ++i) { + // condition 1: no other iter arguments depend on it + SetVector fwdSlice; + mlir::getForwardSlice(forOp.getRegionIterArgs()[i], &fwdSlice); + Operation *yieldOp = forOp.getBody()->getTerminator(); + bool noOtherDependency = std::all_of( + yieldOp->operand_begin(), yieldOp->operand_end(), [&](Value arg) { + return arg == yieldOp->getOperand(i) || + !fwdSlice.contains(arg.getDefiningOp()); + }); + // condition 2: final value is not used after the loop + auto retVal = forOp.getResult(i); + bool noUserAfterLoop = retVal.getUsers().empty(); + // yielding the region iter arg will cause loop canonicalization + // to clean up the dead code + if (noOtherDependency && noUserAfterLoop) { + yieldOp->setOperand(i, forOp.getRegionIterArgs()[i]); + } + } + }); + } +}; +} // anonymous namespace + +std::unique_ptr mlir::createTritonGPUCanonicalizeLoopsPass() { + return std::make_unique(); +} \ No newline at end of file diff --git a/lib/Dialect/TritonGPU/Transforms/Coalesce.cpp b/lib/Dialect/TritonGPU/Transforms/Coalesce.cpp new file mode 100644 index 000000000000..d2cb9c964c70 --- /dev/null +++ b/lib/Dialect/TritonGPU/Transforms/Coalesce.cpp @@ -0,0 +1,139 @@ +#include "triton/Analysis/AxisInfo.h" +#include "triton/Analysis/Utility.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" +#include "triton/Dialect/TritonGPU/Transforms/Passes.h" +#include + +using namespace mlir; +using namespace mlir::triton; + +#define GEN_PASS_CLASSES +#include "triton/Dialect/TritonGPU/Transforms/Passes.h.inc" + +struct CoalescePass : public TritonGPUCoalesceBase { + Attribute getCoalescedEncoding(AxisInfoAnalysis &axisInfo, Value ptr, + int numWarps) { + auto origType = ptr.getType().cast(); + // Get the shape of the tensor. + size_t rank = origType.getRank(); + AxisInfo info = axisInfo.lookupLatticeElement(ptr)->getValue(); + // Layout order in decreasing order of contiguity + SmallVector order(rank); + std::iota(order.begin(), order.end(), 0); + auto contiguity = info.getContiguity(); + std::sort(order.begin(), order.end(), [&](unsigned x, unsigned y) { + return contiguity[x] > contiguity[y]; + }); + + int numElems = product(origType.getShape()); + int numThreads = numWarps * 32; + int numElemsPerThread = std::max(numElems / numThreads, 1); + + // Thread tile size depends on memory alignment + SmallVector sizePerThread(rank, 1); + PointerType ptrType = origType.getElementType().cast(); + auto pointeeType = ptrType.getPointeeType(); + unsigned numBits = pointeeType.isa() + ? 8 + : pointeeType.getIntOrFloatBitWidth(); + unsigned maxMultiple = info.getDivisibility(order[0]); + unsigned maxContig = info.getContiguity(order[0]); + unsigned alignment = std::min(maxMultiple, maxContig); + unsigned perThread = std::min(alignment, 128 / numBits); + sizePerThread[order[0]] = std::min(perThread, numElemsPerThread); + + SmallVector dims(rank); + std::iota(dims.begin(), dims.end(), 0); + // create encoding + Attribute encoding = triton::gpu::BlockedEncodingAttr::get( + &getContext(), origType.getShape(), sizePerThread, order, numWarps); + return encoding; + } + + std::function getTypeConverter(AxisInfoAnalysis &axisInfo, + Value ptr, int numWarps) { + Attribute encoding = getCoalescedEncoding(axisInfo, ptr, numWarps); + return [encoding](Type _type) { + RankedTensorType type = _type.cast(); + return RankedTensorType::get(type.getShape(), type.getElementType(), + encoding); + }; + } + + template + void coalesceOp(AxisInfoAnalysis &axisInfo, Operation *op, Value ptr, + OpBuilder builder) { + RankedTensorType ty = ptr.getType().template dyn_cast(); + if (!ty) + return; + auto mod = op->getParentOfType(); + int numWarps = triton::gpu::TritonGPUDialect::getNumWarps(mod); + + AxisInfo info = axisInfo.lookupLatticeElement(ptr)->getValue(); + auto convertType = getTypeConverter(axisInfo, ptr, numWarps); + // convert operands + SmallVector newArgs; + for (auto v : op->getOperands()) { + auto vTy = v.getType().dyn_cast(); + if (vTy && !vTy.getEncoding().isa()) + newArgs.push_back(builder.create( + op->getLoc(), convertType(v.getType()), v)); + else + newArgs.push_back(v); + } + // convert output types + SmallVector newTypes; + for (auto t : op->getResultTypes()) { + bool is_async = std::is_same::value; + newTypes.push_back(is_async ? t : convertType(t)); + } + // construct new op with the new encoding + Operation *newOp = + builder.create(op->getLoc(), newTypes, newArgs, op->getAttrs()); + // cast the results back to the original layout + for (size_t i = 0; i < op->getNumResults(); i++) { + Value newResult = newOp->getResult(i); + if (newTypes[i] != op->getResultTypes()[i]) { + newResult = builder.create( + op->getLoc(), op->getResult(i).getType(), newResult); + } + op->getResult(i).replaceAllUsesWith(newResult); + } + op->erase(); + } + + void runOnOperation() override { + Operation *op = getOperation(); + // Run axis info analysis + AxisInfoAnalysis axisInfo(&getContext()); + axisInfo.run(op); + OpBuilder builder(op); + + // For each memory op that has a layout L1: + // 1. Create a coalesced memory layout L2 of the pointer operands + // 2. Convert all operands from layout L1 to layout L2 + // 3. Create a new memory op that consumes these operands and + // produces a tensor with layout L2 + // 4. Convert the output of this new memory op back to L1 + // 5. Replace all the uses of the original memory op by the new one + op->walk([&](Operation *curr) { + OpBuilder::InsertionGuard g(builder); + builder.setInsertionPoint(curr); + if (auto load = dyn_cast(curr)) + coalesceOp(axisInfo, curr, load.ptr(), builder); + if (auto op = dyn_cast(curr)) + coalesceOp(axisInfo, curr, op.ptr(), builder); + if (auto op = dyn_cast(curr)) + coalesceOp(axisInfo, curr, op.ptr(), builder); + if (auto load = dyn_cast(curr)) + coalesceOp(axisInfo, curr, load.src(), + builder); + if (auto store = dyn_cast(curr)) + coalesceOp(axisInfo, curr, store.ptr(), builder); + }); + } +}; + +std::unique_ptr mlir::createTritonGPUCoalescePass() { + return std::make_unique(); +} diff --git a/lib/Dialect/TritonGPU/Transforms/Combine.cpp b/lib/Dialect/TritonGPU/Transforms/Combine.cpp new file mode 100644 index 000000000000..7dcdc0162b10 --- /dev/null +++ b/lib/Dialect/TritonGPU/Transforms/Combine.cpp @@ -0,0 +1,1243 @@ +#include "mlir/Analysis/SliceAnalysis.h" +#include "mlir/Dialect/SCF/SCF.h" +#include "mlir/IR/BlockAndValueMapping.h" +#include "mlir/IR/BuiltinAttributes.h" +#include "mlir/IR/Matchers.h" +#include "mlir/IR/PatternMatch.h" +#include "mlir/IR/Verifier.h" +#include "mlir/Interfaces/InferTypeOpInterface.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "mlir/Support/LogicalResult.h" +#include "mlir/Transforms/GreedyPatternRewriteDriver.h" +#include "mlir/Transforms/Passes.h" +#include "mlir/Transforms/RegionUtils.h" +#include "triton/Analysis/Utility.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" +#include "triton/Dialect/TritonGPU/Transforms/Passes.h" +#include "triton/Dialect/TritonGPU/Transforms/TritonGPUConversion.h" + +#include + +using namespace mlir; +namespace { +#include "TritonGPUCombine.inc" + +// ----------------------------------------------------------------------------- +// +// ----------------------------------------------------------------------------- + +// convert(blocked, dot_operand) -> +// convert(blocked, mma) + convert(mma, dot_operand) +// if this value is itself the result of a dot operation +// this is a heuristic to accommodate some pattern seen in fused attention +// kernels. +// TODO: replace this by something more generic, i.e. layout-aware CSE +class DecomposeDotOperand : public mlir::RewritePattern { + +public: + explicit DecomposeDotOperand(mlir::MLIRContext *context) + : mlir::RewritePattern(triton::gpu::ConvertLayoutOp::getOperationName(), + 1, context) {} + + mlir::LogicalResult + matchAndRewrite(mlir::Operation *op, + mlir::PatternRewriter &rewriter) const override { + if (!llvm::isa(op)) + return mlir::failure(); + auto convert = llvm::cast(op); + auto srcType = convert.getOperand().getType().cast(); + auto dstType = convert.getType().cast(); + if (srcType.getEncoding().isa() && + dstType.getEncoding().isa()) { + auto dstDotOperand = + dstType.getEncoding().cast(); + auto dstParent = dstDotOperand.getParent(); + if (dstDotOperand.getOpIdx() == 1 || + !dstParent.isa()) + return mlir::failure(); + auto dstParentMma = dstParent.cast(); + if (dstParentMma.isVolta() || dstParentMma.getWarpsPerCTA()[1] > 1) + return mlir::failure(); + SetVector bwdSlices; + mlir::getBackwardSlice(convert.getResult(), &bwdSlices); + if (llvm::find_if(bwdSlices, [](Operation *op) { + return isa(op); + }) == bwdSlices.end()) + return mlir::failure(); + + auto tmpType = RankedTensorType::get( + dstType.getShape(), dstType.getElementType(), dstParentMma); + auto tmp = rewriter.create( + convert.getLoc(), tmpType, convert.getOperand()); + auto newConvert = rewriter.create( + convert.getLoc(), dstType, tmp); + rewriter.replaceOp(op, {newConvert}); + return mlir::success(); + } + return mlir::failure(); + } +}; + +class SimplifyReduceCvt : public mlir::RewritePattern { +public: + explicit SimplifyReduceCvt(mlir::MLIRContext *context) + : mlir::RewritePattern(triton::ReduceOp::getOperationName(), 2, context) { + } + + mlir::LogicalResult + matchAndRewrite(mlir::Operation *op, + mlir::PatternRewriter &rewriter) const override { + auto reduce = cast(*op); + auto reduceArg = dyn_cast( + reduce.getOperand().getDefiningOp()); + if (!reduceArg) + return mlir::failure(); + // this may generate unsupported conversions in the LLVM codegen + if (reduceArg.getOperand() + .getType() + .cast() + .getEncoding() + .isa()) + return mlir::failure(); + auto newReduce = rewriter.create( + op->getLoc(), reduce.redOp(), reduceArg.getOperand(), reduce.axis()); + if (isa( + *reduceArg.getOperand().getDefiningOp())) + return mlir::failure(); + Value newRet = newReduce.getResult(); + // it's still beneficial to move the conversion + // to after the reduce if necessary since it will be + // done on a rank-reduced tensor hence cheaper + if (newRet.getType() != reduce.getResult().getType()) + newRet = rewriter.create( + op->getLoc(), reduce.getResult().getType(), newRet); + rewriter.replaceOp(op, newRet); + + return success(); + } +}; + +// Layout conversions can't deduce their return type automatically. +// IIUC they are therefore not handled by DRR right now +class SimplifyConversion : public mlir::RewritePattern { +public: + explicit SimplifyConversion(mlir::MLIRContext *context) + : mlir::RewritePattern(triton::gpu::ConvertLayoutOp::getOperationName(), + 4, context) {} + + mlir::LogicalResult + matchAndRewrite(mlir::Operation *op, + mlir::PatternRewriter &rewriter) const override { + if (!llvm::isa(op)) + return mlir::failure(); + auto convert = llvm::cast(op); + // we don't handle conversions to DotOperandEncodingAttr + // this is a heuristics to accommodate fused attention + auto srcType = convert.getOperand().getType().cast(); + auto dstType = convert.getType().cast(); + if (dstType.getEncoding().isa() && + srcType.getEncoding().isa()) + return mlir::failure(); + // convert to the same layout -- we can delete + if (op->getResultTypes() == op->getOperandTypes()) { + rewriter.replaceOp(op, op->getOperands()); + return mlir::success(); + } + Operation *arg = op->getOperand(0).getDefiningOp(); + // block argument + if (!arg) + return mlir::failure(); + // cvt(alloc_tensor(x), type2) -> alloc_tensor(x, type2) + auto alloc_tensor = dyn_cast(arg); + if (alloc_tensor) { + if (!isSharedEncoding(op->getResult(0))) { + return mlir::failure(); + } + rewriter.replaceOpWithNewOp( + op, op->getResult(0).getType()); + return mlir::success(); + } + // cvt(insert_slice(x), type2) -> insert_slice(cvt(x, type2)) + auto insert_slice = dyn_cast(arg); + if (insert_slice) { + if (!isSharedEncoding(op->getResult(0))) { + return mlir::failure(); + } + auto newType = op->getResult(0).getType().cast(); + // Ensure that the new insert_slice op is placed in the same place as the + // old insert_slice op. Otherwise, the new insert_slice op may be placed + // after the async_wait op, which is not allowed. + OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(insert_slice); + auto newArg = rewriter.create( + op->getLoc(), newType, insert_slice.dst()); + rewriter.replaceOpWithNewOp( + op, newType, insert_slice.src(), newArg.getResult(), + insert_slice.index(), insert_slice.mask(), insert_slice.other(), + insert_slice.cache(), insert_slice.evict(), insert_slice.isVolatile(), + insert_slice.axis()); + return mlir::success(); + } + // cvt(extract_slice(x), type2) -> extract_slice(cvt(x, type2)) + auto extract_slice = dyn_cast(arg); + if (extract_slice) { + if (!isSharedEncoding(op->getResult(0))) { + return mlir::failure(); + } + auto origType = extract_slice.source().getType().cast(); + auto newType = RankedTensorType::get( + origType.getShape(), origType.getElementType(), + op->getResult(0).getType().cast().getEncoding()); + auto origResType = op->getResult(0).getType().cast(); + auto resType = RankedTensorType::get( + origResType.getShape(), origResType.getElementType(), + extract_slice.getType().cast().getEncoding()); + // Ensure that the new extract_slice op is placed in the same place as the + // old extract_slice op. Otherwise, the new extract_slice op may be placed + // after the async_wait op, which is not allowed. + OpBuilder::InsertionGuard guard(rewriter); + rewriter.setInsertionPoint(extract_slice); + auto newArg = rewriter.create( + op->getLoc(), newType, extract_slice.source()); + rewriter.replaceOpWithNewOp( + op, resType, newArg.getResult(), extract_slice.offsets(), + extract_slice.sizes(), extract_slice.strides(), + extract_slice.static_offsets(), extract_slice.static_sizes(), + extract_slice.static_strides()); + return mlir::success(); + } + + // cvt(cvt(x, type1), type2) -> cvt(x, type2) + if (llvm::isa(arg)) { + if (arg->getOperand(0).getDefiningOp() && + !isSharedEncoding(arg->getOperand(0)) && + isSharedEncoding(convert.getOperand()) && + !isSharedEncoding(convert.getResult())) { + return mlir::failure(); + } + if (isSharedEncoding(convert.getOperand()) && + isSharedEncoding(convert.getResult())) { + return mlir::failure(); + } + auto srcType = convert.getOperand().getType().cast(); + auto srcShared = + srcType.getEncoding().dyn_cast(); + if (srcShared && srcShared.getVec() > 1) + return mlir::failure(); + rewriter.replaceOpWithNewOp( + op, op->getResultTypes().front(), arg->getOperand(0)); + return mlir::success(); + } + // cvt(type1, splat(type2, x)) -> splat(type1, x) + if (auto splat = llvm::dyn_cast(arg)) { + rewriter.replaceOpWithNewOp(op, op->getResultTypes(), + splat.src()); + return mlir::success(); + } + // cvt(type1, make_range(type2, x)) -> make_range(type1, x) + if (auto range = llvm::dyn_cast(arg)) { + rewriter.replaceOpWithNewOp( + op, op->getResultTypes(), range.start(), range.end()); + return mlir::success(); + } + // cvt(type, constant) -> constant + if (auto cst = llvm::dyn_cast(arg)) + if (auto ret = cst.getValue().dyn_cast()) { + auto newRet = SplatElementsAttr::get(op->getResultTypes().front(), + ret.getSplatValue()); + rewriter.replaceOpWithNewOp(op, newRet); + return mlir::success(); + } + return mlir::failure(); + } +}; + +// ----------------------------------------------------------------------------- +// +// ----------------------------------------------------------------------------- + +// TODO: Interface +LogicalResult invertEncoding(Attribute targetEncoding, Operation *op, + Attribute &ret) { + ret = targetEncoding; + if (auto expand_dims = dyn_cast(op)) { + ret = triton::gpu::SliceEncodingAttr::get( + op->getContext(), expand_dims.axis(), targetEncoding); + } + if (auto reduce = dyn_cast(op)) { + auto sliceEncoding = + targetEncoding.dyn_cast(); + if (!sliceEncoding) + return failure(); + ret = sliceEncoding.getParent(); + } + return success(); +} + +// TODO: Interface +LogicalResult getForwardEncoding(Attribute sourceEncoding, Operation *op, + Attribute &ret) { + if (op->hasTrait()) { + ret = sourceEncoding; + return success(); + } + if (isa(op)) { + ret = Attribute(); + return success(); + } + return failure(); +} + +inline bool expensive_to_remat(Operation *op) { + if (!op) + return true; + if (isa(op)) + return true; + if (isa(op)) + return true; + return false; +} + +LogicalResult simulateBackwardRematerialization( + Operation *initOp, SetVector &processed, + SetVector &layout, llvm::MapVector &toConvert, + Attribute targetEncoding) { + // DFS + std::vector> queue; + queue.emplace_back(initOp, targetEncoding); + // We want to see the effect of converting `initOp` to a new layout + // so we initialize `numCvts = 1`. + int numCvts = 1; + while (!queue.empty()) { + Operation *currOp; + Attribute currLayout; + std::tie(currOp, currLayout) = queue.back(); + queue.pop_back(); + // If the current operation is expensive to rematerialize, + // we stop everything + if (expensive_to_remat(currOp)) + return mlir::failure(); + // we would propagate the conversion here + numCvts -= 1; + // check if the conversion could be folded at this operation + if (isa(*currOp)) + continue; + // done processing + processed.insert(currOp); + layout.insert(currLayout); + // add all operands to the queue + for (Value argI : currOp->getOperands()) { + Attribute newEncoding; + // cannot invert the current encoding for this operand + // we stop everything + if (failed(invertEncoding(currLayout, currOp, newEncoding))) { + return mlir::failure(); + } + if (toConvert.count(argI) && toConvert[argI] != newEncoding) + return mlir::failure(); + // + Operation *opArgI = argI.getDefiningOp(); + toConvert.insert({argI, newEncoding}); + if (!opArgI || processed.contains(opArgI) || + (opArgI->getBlock() != initOp->getBlock())) + continue; + // we add one expensive conversion for the current operand + numCvts += 1; + queue.emplace_back(opArgI, newEncoding); + } + } + // if rematerialization would add more conversions than it removes + // then we don't do it + if (numCvts > 0) + return mlir::failure(); + return mlir::success(); +} + +// + +Operation *cloneWithInferType(mlir::PatternRewriter &rewriter, Operation *op, + BlockAndValueMapping &mapping) { + Operation *newOp = rewriter.clone(*op, mapping); + auto origType = op->getResult(0).getType().cast(); + auto newType = RankedTensorType::get( + origType.getShape(), origType.getElementType(), + newOp->getOperand(0).getType().cast().getEncoding()); + newOp->getResult(0).setType(newType); + auto typeInfer = dyn_cast(newOp); + if (typeInfer) { + SmallVector newType; + auto success = typeInfer.inferReturnTypes( + newOp->getContext(), newOp->getLoc(), newOp->getOperands(), + newOp->getAttrDictionary(), newOp->getRegions(), newType); + if (succeeded(success)) + newOp->getResult(0).setType(newType.front()); + } + return newOp; +} + +// +class MoveConvertOutOfIf : public mlir::RewritePattern { +public: + explicit MoveConvertOutOfIf(mlir::MLIRContext *context) + : mlir::RewritePattern(scf::IfOp::getOperationName(), 2, context) {} + + mlir::LogicalResult + matchAndRewrite(mlir::Operation *op, + mlir::PatternRewriter &rewriter) const override { + auto ifOp = cast(*op); + auto thenYield = ifOp.thenYield(); + auto elseYield = ifOp.elseYield(); + int numOps = thenYield.getNumOperands(); + SmallVector newThenYieldOps = thenYield.getOperands(); + SmallVector newElseYieldOps = elseYield.getOperands(); + SetVector thenCvts; + SetVector elseCvts; + SmallVector newRetTypes; + + BlockAndValueMapping mapping; + for (size_t i = 0; i < numOps; i++) { + auto thenCvt = dyn_cast( + thenYield.getOperand(i).getDefiningOp()); + auto elseCvt = dyn_cast( + elseYield.getOperand(i).getDefiningOp()); + if (thenCvt && elseCvt && + std::distance(thenCvt->user_begin(), thenCvt->user_end()) == 1 && + std::distance(elseCvt->user_begin(), elseCvt->user_end()) == 1 && + thenCvt.getOperand().getType() == elseCvt.getOperand().getType()) { + mapping.map(thenCvt.getResult(), thenCvt.getOperand()); + mapping.map(elseCvt.getResult(), elseCvt.getOperand()); + newRetTypes.push_back(thenCvt.getOperand().getType()); + thenCvts.insert((Operation *)thenCvt); + elseCvts.insert((Operation *)elseCvt); + } else + newRetTypes.push_back(thenYield.getOperand(i).getType()); + } + if (mapping.getValueMap().empty()) + return mlir::failure(); + + rewriter.setInsertionPoint(op); + auto newIfOp = rewriter.create(ifOp.getLoc(), newRetTypes, + ifOp.getCondition(), true); + // rematerialize `then` block + rewriter.setInsertionPointToEnd(newIfOp.thenBlock()); + for (Operation &op : ifOp.thenBlock()->getOperations()) { + if (thenCvts.contains(&op)) { + mapping.map(op.getResult(0), mapping.lookup(op.getOperand(0))); + continue; + } + rewriter.clone(op, mapping); + } + // rematerialize `else` block + rewriter.setInsertionPointToEnd(newIfOp.elseBlock()); + for (Operation &op : ifOp.elseBlock()->getOperations()) { + if (elseCvts.contains(&op)) { + mapping.map(op.getResult(0), mapping.lookup(op.getOperand(0))); + continue; + } + rewriter.clone(op, mapping); + } + + rewriter.setInsertionPointAfter(newIfOp); + SmallVector newRetValues = newIfOp.getResults(); + for (size_t i = 0; i < numOps; i++) { + if (newIfOp.getResult(i).getType() != ifOp.getResult(i).getType()) { + newRetValues[i] = rewriter.create( + newIfOp.getLoc(), ifOp.getResult(i).getType(), + newIfOp.getResult(i)); + } + } + + rewriter.replaceOp(op, newRetValues); + return mlir::success(); + } +}; + +// +class FoldConvertAndReduce : public mlir::RewritePattern { +public: + explicit FoldConvertAndReduce(mlir::MLIRContext *context) + : mlir::RewritePattern(triton::gpu::ConvertLayoutOp::getOperationName(), + 1, context) {} + + mlir::LogicalResult + matchAndRewrite(mlir::Operation *cvtOp, + mlir::PatternRewriter &rewriter) const override { + auto cvt = dyn_cast(*cvtOp); + auto srcEncoding = + cvt.getOperand().getType().cast().getEncoding(); + auto dstEncoding = + cvt.getResult().getType().cast().getEncoding(); + if (srcEncoding.isa()) + return failure(); + SetVector cvtSlices; + auto filter = [&](Operation *op) { + return op->getBlock() == cvt->getBlock() && + !(isa(op) && + !op->getResult(0).getType().isa()) && + !isa(op); + }; + mlir::getForwardSlice(cvt.getResult(), &cvtSlices, filter); + if (cvtSlices.empty()) + return failure(); + + llvm::MapVector toConvert; + for (Operation *op : cvtSlices) { + // don't rematerialize anything expensive + if (expensive_to_remat(op)) + return failure(); + // don't rematerialize non-element-wise + if (!op->hasTrait()) + return failure(); + Attribute dstEncoding = + cvt.getOperand().getType().cast().getEncoding(); + // don't rematerialize if it adds an extra conversion that can't + // be removed + for (Value arg : op->getOperands()) { + Operation *argOp = arg.getDefiningOp(); + SetVector processed; + SetVector layout; + llvm::MapVector toConvert; + if (argOp && (argOp != cvt) && cvtSlices.count(argOp) == 0 && + failed(simulateBackwardRematerialization(argOp, processed, layout, + toConvert, dstEncoding))) { + return failure(); + } + } + } + + BlockAndValueMapping mapping; + auto op = cvtSlices.front(); + for (Value arg : op->getOperands()) { + if (arg.getDefiningOp() == cvt) + mapping.map(arg, cvt.getOperand()); + else { + auto cvtI = rewriter.create( + arg.getLoc(), cvt.getOperand().getType(), arg); + if (Operation *argOp = arg.getDefiningOp()) + cvtI->moveAfter(argOp); + mapping.map(arg, cvtI); + } + } + rewriter.setInsertionPoint(op); + Operation *newOp = rewriter.clone(*op, mapping); + auto oldType = op->getResult(0).getType().cast(); + auto newType = RankedTensorType::get( + oldType.getShape(), oldType.getElementType(), + cvt.getOperand().getType().cast().getEncoding()); + + newOp->getResult(0).setType(newType); + auto newCvtType = RankedTensorType::get( + oldType.getShape(), oldType.getElementType(), + cvt.getResult().getType().cast().getEncoding()); + auto newCvt = rewriter.create( + newOp->getLoc(), newCvtType, newOp->getResult(0)); + rewriter.replaceOp(op, newCvt->getResults()); + return success(); + } +}; + +// Layout conversions are expensive. They require going through +// shared memory, which is orders of magnitude slower than +// other non-i/o operations in the dialect. +// It therefore makes sense to remove them whenever possible, +// even if it means rematerializing all values whose definitions +// are reachable from it without passing through any memory operation. +class RematerializeBackward : public mlir::RewritePattern { +public: + explicit RematerializeBackward(mlir::MLIRContext *context) + : mlir::RewritePattern(triton::gpu::ConvertLayoutOp::getOperationName(), + 2, context) {} + + mlir::LogicalResult + matchAndRewrite(mlir::Operation *cvt, + mlir::PatternRewriter &rewriter) const override { + if (!llvm::isa(cvt)) + return mlir::failure(); + // we don't touch block arguments + Operation *op = cvt->getOperand(0).getDefiningOp(); + if (!op) + return mlir::failure(); + // we don't want to rematerialize any conversion to/from shared + if (isSharedEncoding(cvt->getResults()[0]) || + isSharedEncoding(cvt->getOperand(0))) + return mlir::failure(); + // we don't handle conversions to DotOperandEncodingAttr + // this is a heuristics to accommodate fused attention + auto targetType = cvt->getResultTypes()[0].cast(); + if (targetType.getEncoding().isa()) + return mlir::failure(); + // DFS + SetVector processed; + SetVector layout; + llvm::MapVector toConvert; + std::vector> queue; + queue.emplace_back(cvt, targetType.getEncoding()); + int numCvts = 1; + while (!queue.empty()) { + Operation *currOp; + Attribute currLayout; + std::tie(currOp, currLayout) = queue.back(); + queue.pop_back(); + // If the current operation is expensive to rematerialize, + // we stop everything + if (expensive_to_remat(currOp)) + break; + // a conversion will be removed here (i.e. transferred to operands) + numCvts -= 1; + // done processing + processed.insert(currOp); + layout.insert(currLayout); + // add all operands to the queue + for (Value argI : currOp->getOperands()) { + Attribute newEncoding; + // cannot invert the current encoding for this operand + // we stop everything + if (failed(invertEncoding(currLayout, currOp, newEncoding))) + return mlir::failure(); + if (toConvert.count(argI) && toConvert[argI] != newEncoding) + return mlir::failure(); + // + Operation *opArgI = argI.getDefiningOp(); + toConvert.insert({argI, newEncoding}); + if (!opArgI || processed.contains(opArgI) || + (opArgI->getBlock() != cvt->getBlock())) + continue; + // if the conversion can be folded into opArgI then + // we don't count this conversion as expensive + if (isa(*opArgI)) + continue; + // we add one expensive conversion for the current operand + numCvts += 1; + queue.emplace_back(opArgI, newEncoding); + } + } + // if rematerialization would add more conversions than it removes + // then we don't do it + if (numCvts > 0) + return mlir::failure(); + + SmallVector sortedValues; + SetVector tmp; + for (auto &item : toConvert) { + Value v = item.first; + if (v.getDefiningOp()) + tmp.insert(v.getDefiningOp()); + else + sortedValues.push_back(v); + } + tmp = mlir::topologicalSort(tmp); + for (Operation *op : tmp) + sortedValues.push_back(op->getResult(0)); + + BlockAndValueMapping mapping; + for (Value currOperand : sortedValues) { + // unpack information + Attribute targetLayout = toConvert.lookup(currOperand); + // rematerialize the operand if necessary + Operation *currOperation = currOperand.getDefiningOp(); + if (processed.contains(currOperation)) { + currOperation = cloneWithInferType(rewriter, currOperation, mapping); + currOperand = currOperation->getResult(0); + } + // compute target type for the layout cast + auto currType = currOperand.getType().cast(); + auto newType = RankedTensorType::get( + currType.getShape(), currType.getElementType(), targetLayout); + auto newOperand = rewriter.create( + currOperand.getLoc(), newType, currOperand); + if (currOperation) + newOperand->moveAfter(currOperation); + mapping.map(currOperand, newOperand); + } + rewriter.replaceOp(cvt, mapping.lookup(cvt->getOperand(0))); + return mlir::success(); + } +}; + +// ----------------------------------------------------------------------------- +// +// ----------------------------------------------------------------------------- + +class MoveConvertOutOfLoop : public mlir::RewritePattern { +public: + explicit MoveConvertOutOfLoop(mlir::MLIRContext *context) + : mlir::RewritePattern(scf::ForOp::getOperationName(), 1, context) {} + + SmallVector + rematerializeForLoop(mlir::PatternRewriter &rewriter, scf::ForOp &forOp, + size_t i, RankedTensorType newType, + triton::gpu::ConvertLayoutOp origConversion) const { + // Rewrite init argument + Type origType = forOp.getInitArgs()[i].getType(); + SmallVector newInitArgs = forOp.getInitArgs(); + newInitArgs[i] = rewriter.create( + newInitArgs[i].getLoc(), newType, newInitArgs[i]); + // Clone for loop + auto newForOp = rewriter.create( + forOp.getLoc(), forOp.getLowerBound(), forOp.getUpperBound(), + forOp.getStep(), newInitArgs); + newForOp->moveBefore(forOp); + rewriter.setInsertionPointToStart(newForOp.getBody()); + BlockAndValueMapping mapping; + for (const auto &arg : llvm::enumerate(forOp.getRegionIterArgs())) + mapping.map(arg.value(), newForOp.getRegionIterArgs()[arg.index()]); + mapping.map(origConversion.getResult(), newForOp.getRegionIterArgs()[i]); + // the iter arg of interest may have other uses than the conversion + // we're hoisting out of the loop. If that's the case we will + // need to add extra conversions for all uses... which is only useful + // if these extra conversions can be removed by another pattern + auto oldArg = forOp.getRegionIterArgs()[i]; + auto newArg = newForOp.getRegionIterArgs()[i]; + auto newArgFallback = rewriter.create( + newForOp.getLoc(), origType, newArg); + + mapping.map(forOp.getInductionVar(), newForOp.getInductionVar()); + for (Operation &op : forOp.getBody()->without_terminator()) { + if (&op == (Operation *)(&origConversion)) + continue; + Operation *newOp = rewriter.clone(op, mapping); + if (find(oldArg.getUsers(), &op) != oldArg.getUsers().end()) + newOp->replaceUsesOfWith(newArg, newArgFallback); + } + + // create yield, inserting conversions if necessary + auto yieldOp = forOp.getBody()->getTerminator(); + SmallVector newYieldArgs; + for (Value arg : yieldOp->getOperands()) + newYieldArgs.push_back(mapping.lookup(arg)); + newYieldArgs[i] = rewriter.create( + yieldOp->getLoc(), newType, newYieldArgs[i]); + rewriter.create(forOp.getLoc(), newYieldArgs); + + // replace + SmallVector newResults = newForOp->getResults(); + newResults[i] = rewriter.create( + rewriter.getUnknownLoc(), origType, newForOp->getResult(i)); + newResults[i].getDefiningOp()->moveAfter(newForOp); + return newResults; + } + + mlir::LogicalResult + matchAndRewrite(mlir::Operation *op, + mlir::PatternRewriter &rewriter) const override { + auto forOp = cast(op); + auto iterArgs = forOp.getRegionIterArgs(); + for (const auto &iterArg : llvm::enumerate(iterArgs)) { + // if (iterArg.index() != 1) + // continue; + // skip non-tensor types + if (!iterArg.value().getType().isa()) + continue; + // we only move `iterArg` out of the loop if + // - there is only a single conversion use + // - moving this conversion out of the loop will not generate + // any extra non-removable conversion + auto users = iterArg.value().getUsers(); + // check first condition + SetVector cvtTargetTypes; + for (auto user : users) { + if (isa(user)) { + auto newType = + user->getResults()[0].getType().cast(); + auto oldType = user->getOperand(0).getType().cast(); + if (oldType.getEncoding().isa() && + newType.getEncoding() + .isa()) { + continue; + } + if (newType.getEncoding().isa()) { + if (newType.getEncoding() + .cast() + .getVec() == 1) + continue; + } + cvtTargetTypes.insert(newType); + } + } + if (cvtTargetTypes.size() != 1) + continue; + // TODO: check second condition + for (auto user : users) { + if (isa(user)) + continue; + } + // check + for (auto op : iterArg.value().getUsers()) { + auto cvt = dyn_cast(op); + if (!cvt) + continue; + auto targetType = op->getResultTypes()[0].cast(); + auto newFor = rematerializeForLoop(rewriter, forOp, iterArg.index(), + targetType, cvt); + rewriter.replaceOp(forOp, newFor); + return success(); + } + } + return failure(); + } +}; + +// ----------------------------------------------------------------------------- +// +// ----------------------------------------------------------------------------- + +class RematerializeForward : public mlir::RewritePattern { +public: + explicit RematerializeForward(mlir::MLIRContext *context) + : mlir::RewritePattern(triton::gpu::ConvertLayoutOp::getOperationName(), + 2, context) {} + + mlir::LogicalResult + matchAndRewrite(mlir::Operation *_cvtOp, + mlir::PatternRewriter &rewriter) const override { + auto cvt = cast(_cvtOp); + auto forOp = dyn_cast(cvt->getParentOp()); + if (!forOp) + return mlir::failure(); + auto isInLoop = [&](Operation *op) { return op->getParentOp() == forOp; }; + + SetVector cvtSlices; + auto filter = [&](Operation *op) { + return isInLoop(op) && + !isa(op) && + !isa(op) && !isa(op) && + !isa(op); + }; + mlir::getForwardSlice(cvt.getResult(), &cvtSlices, filter); + if (cvtSlices.empty()) + return failure(); + + for (Operation *op : cvtSlices) { + if (!op->hasTrait() && + !op->hasTrait()) + return failure(); + for (Value arg : op->getOperands()) { + Operation *argOp = arg.getDefiningOp(); + if (argOp && (argOp != cvt) && + !isa(argOp)) { + return failure(); + } + } + } + + // otherwise, we push the conversion forward + // since we'll be able to move it out of + // the loop once it reaches the yield op + // op(cvt(arg_0), arg_1, ..., arg_n) + // -> cvt(op(arg_0, cvt(arg_1), ..., cvt(arg_n))) + BlockAndValueMapping mapping; + auto op = cvtSlices.front(); + for (Value arg : op->getOperands()) { + if (arg.getDefiningOp() == cvt) + mapping.map(arg, cvt.getOperand()); + else { + auto cvtI = rewriter.create( + arg.getLoc(), cvt.getOperand().getType(), arg); + mapping.map(arg, cvtI); + } + } + Operation *newOp = rewriter.clone(*op, mapping); + newOp->getResult(0).setType(cvt.getOperand().getType()); + auto newCvt = rewriter.create( + newOp->getLoc(), cvt.getResult().getType(), newOp->getResult(0)); + rewriter.replaceOp(op, newCvt->getResults()); + return success(); + } +}; + +// ----------------------------------------------------------------------------- +// +// ----------------------------------------------------------------------------- +namespace { +int computeCapabilityToMMAVersion(int computeCapability) { + if (computeCapability < 70) { + return 0; + } else if (computeCapability < 80) { + return 1; + } else if (computeCapability < 90) { + return 2; + } else { + assert(false && "computeCapability > 90 not supported"); + return 3; + } +} + +SmallVector mmaVersionToShapePerWarp(int version) { + if (version == 1) + return {16, 16}; + else if (version == 2) + return {16, 8}; + else { + assert(false && "version not supported"); + return {0, 0}; + } +} + +SmallVector warpsPerTileV1(const ArrayRef shape, + int numWarps) { + SmallVector ret = {1, 1}; + SmallVector shapePerWarp = + mmaVersionToShapePerWarp(1 /*version*/); + bool changed = false; + do { + changed = false; + int pre = ret[0]; + if (ret[0] * ret[1] < numWarps) { + ret[0] = std::clamp(ret[0] * 2, 1, shape[0] / shapePerWarp[0]); + changed = pre != ret[0]; + } + if (ret[0] * ret[1] < numWarps) { + pre = ret[1]; + ret[1] = std::clamp(ret[1] * 2, 1, shape[1] / shapePerWarp[1]); + changed = pre != ret[1]; + } + } while (changed); + return ret; +} + +SmallVector warpsPerTileV2(triton::DotOp dotOp, + const ArrayRef shape, + int numWarps) { + SetVector slices; + mlir::getForwardSlice(dotOp.getResult(), &slices); + if (llvm::find_if(slices, [](Operation *op) { + return isa(op); + }) != slices.end()) + return {(unsigned)numWarps, 1}; + + SmallVector ret = {1, 1}; + SmallVector shapePerWarp = {16, 8}; + bool changed = false; + // TODO (@daadaada): double-check. + // original logic in + // https://github.com/openai/triton/blob/master/lib/codegen/analysis/layout.cc#L252 + // seems buggy for shape = [32, 16] ? + do { + changed = false; + if (ret[0] * ret[1] >= numWarps) + break; + if (shape[0] / shapePerWarp[0] / ret[0] >= + shape[1] / (shapePerWarp[1] * 2) / ret[1]) { + if (ret[0] < shape[0] / shapePerWarp[0]) { + ret[0] *= 2; + } else + ret[1] *= 2; + } else { + ret[1] *= 2; + } + } while (true); + return ret; +} + +} // namespace + +class OptimizeBlockedToShared : public mlir::RewritePattern { +public: + explicit OptimizeBlockedToShared(mlir::MLIRContext *context) + : RewritePattern(triton::gpu::ConvertLayoutOp::getOperationName(), 1, + context) {} + + mlir::LogicalResult + matchAndRewrite(mlir::Operation *op, + mlir::PatternRewriter &rewriter) const override { + auto cvt = cast(op); + auto srcType = cvt.getOperand().getType().cast(); + auto dstType = cvt.getResult().getType().cast(); + auto srcBlockedLayout = + srcType.getEncoding().dyn_cast(); + auto dstSharedLayout = + dstType.getEncoding().dyn_cast(); + if (!srcBlockedLayout || !dstSharedLayout) + return failure(); + if (srcBlockedLayout.getOrder() == dstSharedLayout.getOrder()) + return failure(); + // For now only works if single use is transpose + // TODO: rematerialize #shared uses + auto users = op->getUsers(); + if (std::distance(users.begin(), users.end()) != 1 || + !isa(*users.begin())) + return failure(); + + auto tmpShared = triton::gpu::SharedEncodingAttr::get( + op->getContext(), dstSharedLayout.getVec(), + dstSharedLayout.getPerPhase(), dstSharedLayout.getMaxPhase(), + srcBlockedLayout.getOrder()); + auto tmpType = RankedTensorType::get(srcType.getShape(), + srcType.getElementType(), tmpShared); + auto tmpCvt = rewriter.create( + op->getLoc(), tmpType, cvt.getOperand()); + + auto newDstType = RankedTensorType::get( + users.begin()->getResultTypes()[0].cast().getShape(), + srcType.getElementType(), dstSharedLayout); + + auto newTrans = rewriter.create(op->getLoc(), newDstType, + tmpCvt.getResult()); + + rewriter.replaceOp(*users.begin(), newTrans.getResult()); + return success(); + } +}; + +class OptimizeConvertToDotOperand : public mlir::RewritePattern { +public: + explicit OptimizeConvertToDotOperand(mlir::MLIRContext *context) + : RewritePattern(triton::gpu::ConvertLayoutOp::getOperationName(), 1, + context) {} + + mlir::LogicalResult + matchAndRewrite(mlir::Operation *op, + mlir::PatternRewriter &rewriter) const override { + auto cvt = cast(op); + auto srcType = cvt.getOperand().getType().cast(); + auto dstType = cvt.getResult().getType().cast(); + // order + ArrayRef order; + if (auto srcBlockedLayout = + srcType.getEncoding().dyn_cast()) + order = srcBlockedLayout.getOrder(); + else if (auto srcSharedLayout = + srcType.getEncoding() + .dyn_cast()) + order = srcSharedLayout.getOrder(); + else + return failure(); + // dot operand output + auto dstDotOperandLayout = + dstType.getEncoding().dyn_cast(); + if (!dstDotOperandLayout) + return failure(); + if (!dstDotOperandLayout.getIsMMAv1Row()) + return failure(); + bool isMMAv1Row = + dstDotOperandLayout.getIsMMAv1Row().cast().getValue(); + if ((order[0] == 1 && isMMAv1Row) || (order[0] == 0 && !isMMAv1Row)) + return failure(); + auto newIsRow = BoolAttr::get(op->getContext(), !isMMAv1Row); + auto newDstEncoding = triton::gpu::DotOperandEncodingAttr::get( + op->getContext(), dstDotOperandLayout.getOpIdx(), + dstDotOperandLayout.getParent(), newIsRow); + auto newDstType = RankedTensorType::get( + dstType.getShape(), dstType.getElementType(), newDstEncoding); + auto newCvt = rewriter.create( + op->getLoc(), newDstType, cvt.getOperand()); + rewriter.replaceOp(op, newCvt.getResult()); + return success(); + } +}; + +class BlockedToMMA : public mlir::RewritePattern { + int computeCapability; + +public: + BlockedToMMA(mlir::MLIRContext *context, int computeCapability) + : mlir::RewritePattern(triton::DotOp::getOperationName(), 2, context), + computeCapability(computeCapability) {} + + static SmallVector getWarpsPerTile(triton::DotOp dotOp, + const ArrayRef shape, + int version, int numWarps) { + switch (version) { + case 1: + return warpsPerTileV1(shape, numWarps); + case 2: + return warpsPerTileV2(dotOp, shape, numWarps); + default: + assert(false && "not supported version"); + return {0, 0}; + } + } + + mlir::LogicalResult + matchAndRewrite(mlir::Operation *op, + mlir::PatternRewriter &rewriter) const override { + auto dotOp = cast(op); + // TODO: Check data-types and SM compatibility + auto oldRetType = dotOp.getResult().getType().cast(); + if (oldRetType.getEncoding().isa()) + return failure(); + + auto AType = dotOp.getOperand(0).getType().cast(); + auto BType = dotOp.getOperand(1).getType().cast(); + + // for FMA, should retain the blocked layout. + int versionMajor = computeCapabilityToMMAVersion(computeCapability); + if (!supportMMA(dotOp, versionMajor)) + return failure(); + + auto AOrder = AType.getEncoding() + .cast() + .getParent() + .cast() + .getOrder(); + auto BOrder = BType.getEncoding() + .cast() + .getParent() + .cast() + .getOrder(); + + // get MMA encoding for the given number of warps + auto retShape = oldRetType.getShape(); + auto mod = op->getParentOfType(); + int numWarps = triton::gpu::TritonGPUDialect::getNumWarps(mod); + + auto warpsPerTile = + getWarpsPerTile(dotOp, retShape, versionMajor, numWarps); + triton::gpu::MmaEncodingAttr mmaEnc; + if (versionMajor == 1) { + auto shapeA = AType.getShape(); + auto shapeB = BType.getShape(); + bool isARow = AOrder[0] != 0; + bool isBRow = BOrder[0] != 0; + mmaEnc = triton::gpu::MmaEncodingAttr::get( + oldRetType.getContext(), versionMajor, warpsPerTile, shapeA, shapeB, + isARow, isBRow); + } else if (versionMajor == 2) { + mmaEnc = triton::gpu::MmaEncodingAttr::get( + oldRetType.getContext(), versionMajor, 0 /*versionMinor*/, + warpsPerTile); + } else { + assert(false && "Mma layout only support versionMajor of 1 or 2"); + } + auto newRetType = + RankedTensorType::get(retShape, oldRetType.getElementType(), mmaEnc); + + // convert accumulator + auto oldAcc = dotOp.getOperand(2); + auto newAcc = rewriter.create( + oldAcc.getLoc(), newRetType, oldAcc); + Value a = dotOp.a(); + Value b = dotOp.b(); + auto oldAType = a.getType().cast(); + auto oldBType = b.getType().cast(); + auto oldAOrder = oldAType.getEncoding() + .cast() + .getParent() + .cast() + .getOrder(); + auto oldBOrder = oldBType.getEncoding() + .cast() + .getParent() + .cast() + .getOrder(); + Attribute isMMAv1RowA; + Attribute isMMAv1RowB; + if (versionMajor == 1) { + isMMAv1RowA = BoolAttr::get(getContext(), oldAOrder[0] == 1); + isMMAv1RowB = BoolAttr::get(getContext(), oldBOrder[0] == 1); + } + + auto newAType = RankedTensorType::get( + oldAType.getShape(), oldAType.getElementType(), + triton::gpu::DotOperandEncodingAttr::get( + oldAType.getContext(), 0, newRetType.getEncoding(), isMMAv1RowA)); + auto newBType = RankedTensorType::get( + oldBType.getShape(), oldBType.getElementType(), + triton::gpu::DotOperandEncodingAttr::get( + oldBType.getContext(), 1, newRetType.getEncoding(), isMMAv1RowB)); + + a = rewriter.create(a.getLoc(), newAType, a); + b = rewriter.create(b.getLoc(), newBType, b); + auto newDot = rewriter.create(dotOp.getLoc(), newRetType, a, + b, newAcc, dotOp.allowTF32()); + + rewriter.replaceOpWithNewOp( + op, oldRetType, newDot.getResult()); + return success(); + } +}; + +class FixupLoop : public mlir::RewritePattern { + +public: + explicit FixupLoop(mlir::MLIRContext *context) + : mlir::RewritePattern(scf::ForOp::getOperationName(), 2, context) {} + + mlir::LogicalResult + matchAndRewrite(mlir::Operation *op, + mlir::PatternRewriter &rewriter) const override { + auto forOp = cast(op); + + // Rewrite init argument + SmallVector newInitArgs = forOp.getInitArgs(); + bool shouldRematerialize = false; + for (size_t i = 0; i < newInitArgs.size(); i++) { + auto initArg = newInitArgs[i]; + auto regionArg = forOp.getRegionIterArgs()[i]; + if (newInitArgs[i].getType() != forOp.getRegionIterArgs()[i].getType()) { + shouldRematerialize = true; + break; + } + } + if (!shouldRematerialize) + return failure(); + + scf::ForOp newForOp = rewriter.create( + forOp.getLoc(), forOp.getLowerBound(), forOp.getUpperBound(), + forOp.getStep(), newInitArgs); + newForOp->moveBefore(forOp); + rewriter.setInsertionPointToStart(newForOp.getBody()); + BlockAndValueMapping mapping; + for (const auto &arg : llvm::enumerate(forOp.getRegionIterArgs())) + mapping.map(arg.value(), newForOp.getRegionIterArgs()[arg.index()]); + + for (Operation &op : forOp.getBody()->getOperations()) { + Operation *newOp = rewriter.clone(op, mapping); + } + rewriter.replaceOp(forOp, newForOp.getResults()); + return success(); + } +}; + +} // namespace + +#define GEN_PASS_CLASSES +#include "triton/Dialect/TritonGPU/Transforms/Passes.h.inc" + +class TritonGPUCombineOpsPass + : public TritonGPUCombineOpsBase { +public: + TritonGPUCombineOpsPass() = default; + TritonGPUCombineOpsPass(int computeCapability) { + this->computeCapability = computeCapability; + } + void runOnOperation() override { + MLIRContext *context = &getContext(); + ModuleOp m = getOperation(); + + mlir::RewritePatternSet patterns(context); + + patterns.add(context); + patterns.add(context); + patterns.add(context); + patterns.add(context); + patterns.add(context); + patterns.add(context); + patterns.add(context); + patterns.add(context); + patterns.add(context); + patterns.add(context); + patterns.add(context, computeCapability); + + if (applyPatternsAndFoldGreedily(m, std::move(patterns)).failed()) { + signalPassFailure(); + } + + mlir::RewritePatternSet loopFixup(context); + loopFixup.add(context); + if (applyPatternsAndFoldGreedily(m, std::move(loopFixup)).failed()) { + signalPassFailure(); + } + } +}; + +std::unique_ptr +mlir::createTritonGPUCombineOpsPass(int computeCapability) { + return std::make_unique(computeCapability); +} diff --git a/lib/Dialect/TritonGPU/Transforms/Combine.td b/lib/Dialect/TritonGPU/Transforms/Combine.td new file mode 100644 index 000000000000..6bf1b14866df --- /dev/null +++ b/lib/Dialect/TritonGPU/Transforms/Combine.td @@ -0,0 +1,7 @@ +#ifndef TRITONGPU_PATTERNS +#define TRITONGPU_PATTERNS + +include "triton/Dialect/TritonGPU/IR/TritonGPUOps.td" +include "triton/Dialect/Triton/IR/TritonOps.td" + +#endif diff --git a/lib/Dialect/TritonGPU/Transforms/Pipeline.cpp b/lib/Dialect/TritonGPU/Transforms/Pipeline.cpp new file mode 100644 index 000000000000..dd5aa9d2a312 --- /dev/null +++ b/lib/Dialect/TritonGPU/Transforms/Pipeline.cpp @@ -0,0 +1,656 @@ +#include "mlir/Dialect/Tensor/IR/Tensor.h" +#include "mlir/IR/BlockAndValueMapping.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" +#include "triton/Dialect/TritonGPU/Transforms/Passes.h" + +//===----------------------------------------------------------------------===// +// +// This file implements loop software pipelining +// The implementation here is inspired by the pipeline pass in Triton (-v2.0) +// and SCF's LoopPipelining. +// +//===----------------------------------------------------------------------===// + +using namespace mlir; +namespace ttg = triton::gpu; + +#define GEN_PASS_CLASSES +#include "triton/Dialect/TritonGPU/Transforms/Passes.h.inc" + +static Type getI1SameShape(Value v) { + Type vType = v.getType(); + auto i1Type = IntegerType::get(vType.getContext(), 1); + auto tensorType = vType.cast(); + return RankedTensorType::get(tensorType.getShape(), i1Type, + tensorType.getEncoding()); +} + +#define int_attr(num) builder.getI64IntegerAttr(num) + +namespace { + +class LoopPipeliner { + /// Cache forOp we are working on + scf::ForOp forOp; + + /// Cache YieldOp for this forOp + scf::YieldOp yieldOp; + + /// Loads to be pipelined + SetVector loads; + /// The value that each load will be mapped to (after layout conversion) + DenseMap loadsMapping; + /// load => buffer + DenseMap loadsBuffer; + /// load => buffer type (with shared layout after swizzling) + DenseMap loadsBufferType; + /// load => buffer at stage N + DenseMap> loadStageBuffer; + /// load => after extract + DenseMap loadsExtract; + /// + Value pipelineIterIdx; + /// + Value loopIterIdx; + + /// Comments on numStages: + /// [0, numStages-1) are in the prologue + /// numStages-1 is appended after the loop body + int numStages; + + /// value (in loop) => value at stage N + DenseMap> valueMapping; + + /// Block arguments that loads depend on + DenseSet depArgs; + + /// Operations (inside the loop body) that loads depend on + DenseSet depOps; + + /// collect values that v depends on and are defined inside the loop + void collectDeps(Value v, int stages, DenseSet &deps); + + void setValueMapping(Value origin, Value newValue, int stage); + + Value lookupOrDefault(Value origin, int stage); + + /// Returns a empty buffer of size + ttg::AllocTensorOp allocateEmptyBuffer(Operation *op, OpBuilder &builder); + +public: + LoopPipeliner(scf::ForOp forOp, int numStages) + : forOp(forOp), numStages(numStages) { + // cache yieldOp + yieldOp = cast(forOp.getBody()->getTerminator()); + } + + /// Collect loads to pipeline. Return success if we can pipeline this loop + LogicalResult initialize(); + + /// Emit pipelined loads (before loop body) + void emitPrologue(); + + /// emit pipelined loads (after loop body) + void emitEpilogue(); + + /// create the new ForOp (add new args & insert prefetched ops) + scf::ForOp createNewForOp(); + + friend struct PipelinePass; +}; + +// helpers +void LoopPipeliner::setValueMapping(Value origin, Value newValue, int stage) { + if (valueMapping.find(origin) == valueMapping.end()) + valueMapping[origin] = SmallVector(numStages); + valueMapping[origin][stage] = newValue; +} + +Value LoopPipeliner::lookupOrDefault(Value origin, int stage) { + if (valueMapping.find(origin) == valueMapping.end()) + return origin; + return valueMapping[origin][stage]; +} + +void LoopPipeliner::collectDeps(Value v, int stages, DenseSet &deps) { + // Loop-invariant value, skip + if (v.getParentRegion() != &forOp.getLoopBody()) + return; + + // Since we only need to peel the loop numStages-1 times, don't worry about + // depends that are too far away + if (stages < 0) + return; + + if (auto arg = v.dyn_cast()) { + if (arg.getArgNumber() > 0) { + // Skip the first arg (loop induction variable) + // Otherwise the op idx is arg.getArgNumber()-1 + deps.insert(v); + collectDeps(yieldOp->getOperand(arg.getArgNumber() - 1), stages - 1, + deps); + } + } else { // value + // v might be in deps, but we still need to visit v. + // This is because v might depend on value in previous iterations + deps.insert(v); + for (Value op : v.getDefiningOp()->getOperands()) + collectDeps(op, stages, deps); + } +} + +ttg::AllocTensorOp LoopPipeliner::allocateEmptyBuffer(Operation *op, + OpBuilder &builder) { + // Allocate a buffer for each pipelined tensor + // shape: e.g. (numStages==4), <32x64xbf16> -> <4x32x64xbf16> + Value convertLayout = loadsMapping[op->getResult(0)]; + if (auto tensorType = convertLayout.getType().dyn_cast()) { + return builder.create( + convertLayout.getLoc(), loadsBufferType[op->getResult(0)]); + } + llvm_unreachable("Async copy's return should be of RankedTensorType"); +} + +/// A load instruction can be pipelined if: +/// - the load doesn't depend on any other loads (after loop peeling) +/// - (?) this load is not a loop-invariant value (we should run LICM before +/// this pass?) +LogicalResult LoopPipeliner::initialize() { + Block *loop = forOp.getBody(); + + // can we use forOp.walk(...) here? + SmallVector allLoads; + for (Operation &op : *loop) + if (auto loadOp = dyn_cast(&op)) + allLoads.push_back(loadOp); + + // Early stop: no need to continue if there is no load in the loop. + if (allLoads.empty()) + return failure(); + + // load => values that it depends on + DenseMap> loadDeps; + for (triton::LoadOp loadOp : allLoads) { + DenseSet deps; + for (Value op : loadOp->getOperands()) + collectDeps(op, numStages - 1, deps); + loadDeps[loadOp] = deps; + } + + // Don't pipeline loads that depend on other loads + // (Because if a load depends on another load, this load needs to wait on the + // other load in the prologue, which is against the point of the pipeline + // pass) + for (triton::LoadOp loadOp : allLoads) { + bool isCandidate = true; + for (triton::LoadOp other : allLoads) { + if (loadDeps[loadOp].contains(other)) { + isCandidate = false; + break; + } + } + + // We only pipeline loads that have one covert_layout (to dot_op) use + // TODO: lift this constraint in the future + if (isCandidate && loadOp.getResult().hasOneUse()) { + isCandidate = false; + Operation *use = *loadOp.getResult().getUsers().begin(); + if (auto convertLayout = llvm::dyn_cast(use)) { + if (auto tensorType = convertLayout.getResult() + .getType() + .dyn_cast()) { + if (auto dotOpEnc = tensorType.getEncoding() + .dyn_cast()) { + isCandidate = true; + loadsMapping[loadOp] = convertLayout; + auto ty = loadOp.getType().cast(); + SmallVector bufferShape(ty.getShape().begin(), + ty.getShape().end()); + bufferShape.insert(bufferShape.begin(), numStages); + auto sharedEnc = ttg::SharedEncodingAttr::get( + ty.getContext(), dotOpEnc, ty.getShape(), + triton::gpu::getOrder(ty.getEncoding()), ty.getElementType()); + loadsBufferType[loadOp] = RankedTensorType::get( + bufferShape, ty.getElementType(), sharedEnc); + } + } + } + } else + isCandidate = false; + + if (isCandidate) + loads.insert(loadOp); + } + + // We have some loads to pipeline + if (!loads.empty()) { + // Update depArgs & depOps + for (Value loadOp : loads) { + for (Value dep : loadDeps[loadOp]) { + // TODO: we should record the stage that the value is depended on + if (auto arg = dep.dyn_cast()) + depArgs.insert(arg); + else + depOps.insert(dep.getDefiningOp()); + } + } + return success(); + } + + return failure(); +} + +void LoopPipeliner::emitPrologue() { + // llvm::errs() << "loads to pipeline...:\n"; + // for (Value load : loads) + // llvm::errs() << load << "\n"; + + OpBuilder builder(forOp); + for (BlockArgument &arg : forOp.getRegionIterArgs()) { + OpOperand &operand = forOp.getOpOperandForRegionIterArg(arg); + setValueMapping(arg, operand.get(), 0); + } + + // prologue from [0, numStage-1) + Value iv = forOp.getLowerBound(); + pipelineIterIdx = builder.create(iv.getLoc(), 0, 32); + for (int stage = 0; stage < numStages - 1; ++stage) { + // Special handling for induction variable as the increment is implicit + if (stage != 0) + iv = builder.create(iv.getLoc(), iv, forOp.getStep()); + setValueMapping(forOp.getInductionVar(), iv, stage); + + // Special handling for loop condition as there is no condition in ForOp + Value loopCond = builder.create( + iv.getLoc(), arith::CmpIPredicate::slt, iv, forOp.getUpperBound()); + + // Rematerialize peeled values + SmallVector orderedDeps; + for (Operation &op : forOp.getLoopBody().front()) { + if (depOps.contains(&op)) + orderedDeps.push_back(&op); + else if (loads.contains(op.getResult(0))) + orderedDeps.push_back(&op); + } + assert(depOps.size() + loads.size() == orderedDeps.size() && + "depOps contains invalid values"); + for (Operation *op : orderedDeps) { + Operation *newOp = nullptr; + if (loads.contains(op->getResult(0))) { + // Allocate empty buffer + if (stage == 0) { + loadsBuffer[op->getResult(0)] = allocateEmptyBuffer(op, builder); + loadStageBuffer[op->getResult(0)] = {loadsBuffer[op->getResult(0)]}; + } + // load => copy async + if (auto loadOp = llvm::dyn_cast(op)) { + Value mask = lookupOrDefault(loadOp.mask(), stage); + Value newMask; + if (mask) { + Value splatCond = builder.create( + mask.getLoc(), mask.getType(), loopCond); + newMask = + builder.create(mask.getLoc(), mask, splatCond); + } else { + newMask = builder.create( + loopCond.getLoc(), getI1SameShape(loadOp), loopCond); + } + // TODO: check if the hardware supports async copy + newOp = builder.create( + op->getLoc(), loadsBuffer[loadOp].getType(), + lookupOrDefault(loadOp.ptr(), stage), + loadStageBuffer[loadOp][stage], pipelineIterIdx, newMask, + lookupOrDefault(loadOp.other(), stage), loadOp.cache(), + loadOp.evict(), loadOp.isVolatile(), /*axis*/ 0); + loadStageBuffer[loadOp].push_back(newOp->getResult(0)); + } else + llvm_unreachable("This should be LoadOp"); + } else { + newOp = builder.clone(*op); + // Update loop-carried uses + for (unsigned opIdx = 0; opIdx < op->getNumOperands(); ++opIdx) { + auto it = valueMapping.find(op->getOperand(opIdx)); + if (it != valueMapping.end()) { + Value v = it->second[stage]; + assert(v); + newOp->setOperand(opIdx, v); + } // else, op at opIdx is a loop-invariant value + } + } + + // Update mapping of results + for (unsigned dstIdx : llvm::seq(unsigned(0), op->getNumResults())) { + Value originalResult = op->getResult(dstIdx); + // copy_async will update the value of its only use + // TODO: load should not be used in the preheader? + if (loads.contains(originalResult)) { + break; + // originalResult = loadsMapping[originalResult]; + } + setValueMapping(originalResult, newOp->getResult(dstIdx), stage); + // update mapping for loop-carried values (args) + for (OpOperand &operand : yieldOp->getOpOperands()) { + if (operand.get() == op->getResult(dstIdx)) + setValueMapping( + forOp.getRegionIterArgs()[operand.getOperandNumber()], + newOp->getResult(dstIdx), stage + 1); + } + } + } // for (Operation *op : orderedDeps) + + pipelineIterIdx = builder.create( + iv.getLoc(), pipelineIterIdx, + builder.create(iv.getLoc(), 1, 32)); + } // for (int stage = 0; stage < numStages - 1; ++stage) + + // async.wait & extract_slice + builder.create(loads[0].getLoc(), + loads.size() * (numStages - 2)); + loopIterIdx = builder.create(iv.getLoc(), 0, 32); + for (Value loadOp : loads) { + auto sliceType = loadsMapping[loadOp].getType().cast(); + sliceType = + RankedTensorType::get(sliceType.getShape(), sliceType.getElementType(), + loadsBufferType[loadOp].getEncoding()); + Value extractSlice = builder.create( + loadOp.getLoc(), sliceType, loadStageBuffer[loadOp][numStages - 1], + SmallVector{int_attr(0), int_attr(0), int_attr(0)}, + SmallVector{int_attr(1), + int_attr(sliceType.getShape()[0]), + int_attr(sliceType.getShape()[1])}, + SmallVector{int_attr(1), int_attr(1), int_attr(1)}); + loadsExtract[loadOp] = extractSlice; + } + // Bump up loopIterIdx, this is used for getting the correct slice for the + // *next* iteration + loopIterIdx = builder.create( + loopIterIdx.getLoc(), loopIterIdx, + builder.create(loopIterIdx.getLoc(), 1, 32)); +} + +void LoopPipeliner::emitEpilogue() { + // If there's any outstanding async copies, we need to wait for them. + OpBuilder builder(forOp); + OpBuilder::InsertionGuard g(builder); + builder.setInsertionPointAfter(forOp); + builder.create(forOp.getLoc(), 0); +} + +scf::ForOp LoopPipeliner::createNewForOp() { + OpBuilder builder(forOp); + + // Order of new args: + // (original args) + // (insertSliceAsync buffer at stage numStages - 1) for each load + // (extracted tensor) for each load + // (depArgs at stage numStages - 1) + // (iv at stage numStages - 2) + // (pipeline iteration index) + // (loop iteration index) + SmallVector newLoopArgs; + // We need this to update operands for yield + // original block arg => new arg's idx + DenseMap depArgsIdx; + for (auto v : forOp.getIterOperands()) + newLoopArgs.push_back(v); + + size_t bufferIdx = newLoopArgs.size(); + for (Value loadOp : loads) + newLoopArgs.push_back(loadStageBuffer[loadOp].back()); + size_t loadIdx = newLoopArgs.size(); + for (Value loadOp : loads) + newLoopArgs.push_back(loadsExtract[loadOp]); + + size_t depArgsBeginIdx = newLoopArgs.size(); + for (BlockArgument depArg : depArgs) { + depArgsIdx[depArg] = newLoopArgs.size(); + newLoopArgs.push_back(valueMapping[depArg][numStages - 1]); + } + + size_t nextIVIdx = newLoopArgs.size(); + newLoopArgs.push_back(valueMapping[forOp.getInductionVar()][numStages - 2]); + newLoopArgs.push_back(pipelineIterIdx); + newLoopArgs.push_back(loopIterIdx); + + for (size_t i = 0; i < newLoopArgs.size(); ++i) + assert(newLoopArgs[i]); + + // 1. signature of the new ForOp + auto newForOp = builder.create( + forOp.getLoc(), forOp.getLowerBound(), forOp.getUpperBound(), + forOp.getStep(), newLoopArgs); + + // 2. body of the new ForOp + builder.setInsertionPointToStart(newForOp.getBody()); + BlockAndValueMapping mapping; + for (const auto &arg : llvm::enumerate(forOp.getRegionIterArgs())) + mapping.map(arg.value(), newForOp.getRegionIterArgs()[arg.index()]); + mapping.map(forOp.getInductionVar(), newForOp.getInductionVar()); + + // 2.1 clone the loop body, replace original args with args of the new ForOp + // Insert async wait if necessary. + for (Operation &op : forOp.getBody()->without_terminator()) { + Operation *newOp = builder.clone(op, mapping); + // update mapping of results + for (unsigned dstIdx : llvm::seq(unsigned(0), op.getNumResults())) + mapping.map(op.getResult(dstIdx), newOp->getResult(dstIdx)); + } + + // 3. replace loads with block args (from prologue) + for (size_t idx = 0; idx < loads.size(); ++idx) { + Value load = loads[idx]; + assert(load.hasOneUse() && + "we assume that this load has one use (ConvertLayout)"); + Value loadUse = load.getUsers().begin()->getResult(0); + mapping.lookup(loadUse).replaceAllUsesWith( + newForOp.getRegionIterArgs()[loadIdx + idx]); + // delete old load and layout conversion + mapping.lookup(loadUse).getDefiningOp()->erase(); + mapping.lookup(load).getDefiningOp()->erase(); + } + + // 4. prefetch the next iteration + SmallVector orderedDeps; + for (Operation &op : forOp.getLoopBody().front()) { + if (depOps.contains(&op)) + orderedDeps.push_back(&op); + else if (loads.contains(op.getResult(0))) + orderedDeps.push_back(&op); + } + assert(depOps.size() + loads.size() == orderedDeps.size() && + "depOps contains invalid values"); + BlockAndValueMapping nextMapping; + DenseMap depArgsMapping; + size_t argIdx = 0; + for (BlockArgument arg : depArgs) { + nextMapping.map(arg, + newForOp.getRegionIterArgs()[argIdx + depArgsBeginIdx]); + ++argIdx; + } + // Special handling for iv & loop condition + Value nextIV = builder.create( + newForOp.getInductionVar().getLoc(), + newForOp.getRegionIterArgs()[nextIVIdx], newForOp.getStep()); + Value nextLoopCond = + builder.create(nextIV.getLoc(), arith::CmpIPredicate::slt, + nextIV, newForOp.getUpperBound()); + nextMapping.map(forOp.getInductionVar(), nextIV); + + // Slice index + SmallVector nextBuffers; + SmallVector extractSlices; + + pipelineIterIdx = newForOp.getRegionIterArgs()[nextIVIdx + 1]; + Value insertSliceIndex = builder.create( + nextIV.getLoc(), pipelineIterIdx, + builder.create(nextIV.getLoc(), numStages, 32)); + loopIterIdx = newForOp.getRegionIterArgs()[nextIVIdx + 2]; + Value extractSliceIndex = builder.create( + nextIV.getLoc(), loopIterIdx, + builder.create(nextIV.getLoc(), numStages, 32)); + extractSliceIndex = builder.create( + extractSliceIndex.getLoc(), builder.getIndexType(), extractSliceIndex); + + for (Operation *op : orderedDeps) { + Operation *nextOp = nullptr; + // Update loading mask + if (loads.contains(op->getResult(0))) { + auto loadOp = llvm::cast(op); + Value mask = loadOp.mask(); + Value newMask; + if (mask) { + Value splatCond = builder.create( + mask.getLoc(), mask.getType(), nextLoopCond); + newMask = builder.create( + mask.getLoc(), splatCond, nextMapping.lookupOrDefault(mask)); + // If mask is defined outside the loop, don't update the map more than + // once + if (!(forOp.isDefinedOutsideOfLoop(mask) && nextMapping.contains(mask))) + nextMapping.map(mask, newMask); + newMask = nextMapping.lookupOrDefault(loadOp.mask()); + } else + newMask = builder.create( + loadOp.getLoc(), getI1SameShape(loadOp), nextLoopCond); + Value insertAsyncOp = builder.create( + op->getLoc(), loadsBuffer[loadOp].getType(), + nextMapping.lookupOrDefault(loadOp.ptr()), + newForOp.getRegionIterArgs()[bufferIdx + nextBuffers.size()], + insertSliceIndex, newMask, + nextMapping.lookupOrDefault(loadOp.other()), loadOp.cache(), + loadOp.evict(), loadOp.isVolatile(), /*axis*/ 0); + nextBuffers.push_back(insertAsyncOp); + auto sliceType = loadsMapping[loadOp].getType().cast(); + sliceType = RankedTensorType::get(sliceType.getShape(), + sliceType.getElementType(), + loadsBufferType[loadOp].getEncoding()); + nextOp = builder.create( + op->getLoc(), sliceType, insertAsyncOp, + SmallVector{extractSliceIndex, int_attr(0), + int_attr(0)}, + SmallVector{int_attr(1), + int_attr(sliceType.getShape()[0]), + int_attr(sliceType.getShape()[1])}, + SmallVector{int_attr(1), int_attr(1), int_attr(1)}); + extractSlices.push_back(nextOp->getResult(0)); + } else + nextOp = builder.clone(*op, nextMapping); + // Update mapping of results + for (unsigned dstIdx : llvm::seq(unsigned(0), op->getNumResults())) { + nextMapping.map(op->getResult(dstIdx), nextOp->getResult(dstIdx)); + // If this is a loop-carried value, update the mapping for yield + auto originYield = cast(forOp.getBody()->getTerminator()); + for (OpOperand &operand : originYield->getOpOperands()) { + if (operand.get() == op->getResult(dstIdx)) { + size_t originIdx = operand.getOperandNumber(); + size_t newArgIdx = depArgsIdx[forOp.getRegionIterArgs()[originIdx]]; + BlockArgument newArg = newForOp.getRegionIterArgs()[newArgIdx]; + depArgsMapping[newArg] = nextOp->getResult(dstIdx); + } + } + } + } + + { + OpBuilder::InsertionGuard guard(builder); + for (Operation &op : *newForOp.getBody()) { + if (auto dotOp = llvm::dyn_cast(&op)) { + builder.setInsertionPoint(&op); + auto dotType = dotOp.getType().cast(); + Value a = dotOp.a(); + Value b = dotOp.b(); + auto layoutCast = [&](Value dotOperand, int opIdx) -> Value { + auto tensorType = dotOperand.getType().cast(); + if (!tensorType.getEncoding().isa()) { + auto newEncoding = ttg::DotOperandEncodingAttr::get( + tensorType.getContext(), opIdx, dotType.getEncoding()); + auto newType = + RankedTensorType::get(tensorType.getShape(), + tensorType.getElementType(), newEncoding); + return builder.create(dotOperand.getLoc(), + newType, dotOperand); + } + return dotOperand; + }; + a = layoutCast(a, 0); + b = layoutCast(b, 1); + dotOp->setOperand(0, a); + dotOp->setOperand(1, b); + } + } + } + + // async.wait & extract_slice + Operation *asyncWait = builder.create( + loads[0].getLoc(), loads.size() * (numStages - 2)); + for (auto it = extractSlices.rbegin(); it != extractSlices.rend(); ++it) { + // move extract_slice after asyncWait + it->getDefiningOp()->moveAfter(asyncWait); + } + + // Bump iteration count + pipelineIterIdx = builder.create( + nextIV.getLoc(), pipelineIterIdx, + builder.create(nextIV.getLoc(), 1, 32)); + loopIterIdx = builder.create( + nextIV.getLoc(), loopIterIdx, + builder.create(nextIV.getLoc(), 1, 32)); + + // Finally, the YieldOp, need to sync with the order of newLoopArgs + SmallVector yieldValues; + for (Value v : forOp.getBody()->getTerminator()->getOperands()) + yieldValues.push_back(mapping.lookup(v)); + for (Value nextBuffer : nextBuffers) + yieldValues.push_back(nextBuffer); + for (Value nextSlice : extractSlices) + yieldValues.push_back(nextSlice); + + for (size_t i = depArgsBeginIdx; i < nextIVIdx; ++i) { + auto arg = newForOp.getRegionIterArgs()[i]; + assert(depArgsMapping.count(arg) && "Missing loop-carried value"); + yieldValues.push_back(depArgsMapping[arg]); + } + yieldValues.push_back(nextIV); + yieldValues.push_back(pipelineIterIdx); + yieldValues.push_back(loopIterIdx); + + builder.setInsertionPointToEnd(newForOp.getBody()); + builder.create(forOp.getBody()->getTerminator()->getLoc(), + yieldValues); + return newForOp; +} + +// ref: mlir/lib/Dialect/SCF/Transforms/LoopPipelining.cpp +struct PipelinePass : public TritonGPUPipelineBase { + PipelinePass() = default; + PipelinePass(int numStages) { this->numStages = numStages; } + + void runOnOperation() override { + int numStages = this->numStages; + + if (numStages <= 1) + return; + + getOperation()->walk([&](scf::ForOp forOp) -> void { + LoopPipeliner pipeliner(forOp, numStages); + + if (pipeliner.initialize().failed()) + return; + + pipeliner.emitPrologue(); + + scf::ForOp newForOp = pipeliner.createNewForOp(); + + pipeliner.emitEpilogue(); + + // replace the original loop + for (unsigned i = 0; i < forOp->getNumResults(); ++i) + forOp->getResult(i).replaceAllUsesWith(newForOp->getResult(i)); + forOp->erase(); + }); + } +}; +} // anonymous namespace + +std::unique_ptr mlir::createTritonGPUPipelinePass(int numStages) { + return std::make_unique(numStages); +} diff --git a/lib/Dialect/TritonGPU/Transforms/Prefetch.cpp b/lib/Dialect/TritonGPU/Transforms/Prefetch.cpp new file mode 100644 index 000000000000..c6e27eec6c62 --- /dev/null +++ b/lib/Dialect/TritonGPU/Transforms/Prefetch.cpp @@ -0,0 +1,313 @@ +//===----------------------------------------------------------------------===// +// +// This pass tries to prefetch operands (a and b) of tt.dot. +// Those ConvertLayoutOps will be lowered to shared memory loads. +// +// For example: +// %a: tensor<128x32xf16, #enc> +// scf.for %iv = ... iter_args(%a_arg = %a, ...) { +// %d = tt.dot %a_arg, %b, %c +// ... +// scf.yield %a_next, ... +// } +// +// will be translated to +// +// %a: tensor<128x32xf16, #enc> +// %a_tmp = tensor.extract_slice %a[0, 0] [128, 16] +// %a_prefetch = triton_gpu.convert_layout %a_tmp +// scf.for %iv = ... iter_args(%a_buf = %a, ..., %a_prefetch_arg = %a_prefetch) +// { +// %x = tt.dot %a_arg, %b, %c +// %a_tmp_rem = tensor.extract_slice %a_buf[0, 16] [128, 16] +// %a_prefetch_next = triton_gpu.convert_layout %a_tmp_rem +// ... +// scf.yield %next_a, ..., %a_prefetch_next +// } +//===----------------------------------------------------------------------===// + +#include "mlir/IR/BlockAndValueMapping.h" +#include "triton/Analysis/Utility.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" +#include "triton/Dialect/TritonGPU/Transforms/Passes.h" + +using namespace mlir; + +#define GEN_PASS_CLASSES +#include "triton/Dialect/TritonGPU/Transforms/Passes.h.inc" + +namespace { + +class Prefetcher { + /// cache the ForOp we are working on + scf::ForOp forOp; + /// cache the YieldOp of this ForOp + scf::YieldOp yieldOp; + /// + // TODO: add a hook to infer prefetchWidth + unsigned prefetchWidth = 16; + + /// dots to be prefetched + SetVector dots; + /// dot => dot operand + DenseMap dot2aLoopArg; + DenseMap dot2aHeaderDef; + DenseMap dot2bLoopArg; + DenseMap dot2bHeaderDef; + DenseMap dot2aYield; + DenseMap dot2bYield; + /// operand => defining + DenseMap operand2headPrefetch; + + LogicalResult isForOpOperand(Value v); + + Value generatePrefetch(Value v, unsigned opIdx, bool isPrologue, + Attribute dotEncoding, OpBuilder &builder, + llvm::Optional offsetK = llvm::None, + llvm::Optional shapeK = llvm::None); + +public: + Prefetcher() = delete; + + Prefetcher(scf::ForOp forOp) : forOp(forOp) { + yieldOp = cast(forOp.getBody()->getTerminator()); + } + + LogicalResult initialize(); + + void emitPrologue(); + + scf::ForOp createNewForOp(); +}; + +Value Prefetcher::generatePrefetch(Value v, unsigned opIdx, bool isPrologue, + Attribute dotEncoding, OpBuilder &builder, + llvm::Optional offsetK, + llvm::Optional shapeK) { + // opIdx: 0 => a, 1 => b + auto type = v.getType().cast(); + SmallVector shape{type.getShape().begin(), type.getShape().end()}; + SmallVector offset{0, 0}; + Type elementType = type.getElementType(); + + auto intAttr = [&](int64_t val) { return builder.getI64IntegerAttr(val); }; + + // k => (prefetchWidth, k - prefetchWidth) + int64_t kIdx = opIdx == 0 ? 1 : 0; + + offset[kIdx] = isPrologue ? 0 : prefetchWidth; + shape[kIdx] = isPrologue ? prefetchWidth : (shape[kIdx] - prefetchWidth); + + if (shapeK) + shape[kIdx] = *shapeK; + if (offsetK) + offset[kIdx] = *offsetK; + + Value newSmem = builder.create( + v.getLoc(), + // TODO: encoding? + RankedTensorType::get(shape, elementType, type.getEncoding()), v, + SmallVector{intAttr(offset[0]), intAttr(offset[1])}, + SmallVector{intAttr(shape[0]), intAttr(shape[1])}, + SmallVector{intAttr(1), intAttr(1)}); + + auto dotOperandEnc = triton::gpu::DotOperandEncodingAttr::get( + builder.getContext(), opIdx, dotEncoding); + Value prefetchSlice = builder.create( + v.getLoc(), RankedTensorType::get(shape, elementType, dotOperandEnc), + newSmem); + + return prefetchSlice; +} + +LogicalResult Prefetcher::initialize() { + Block *loop = forOp.getBody(); + + SmallVector dotsInFor; + for (Operation &op : *loop) + if (auto dotOp = dyn_cast(op)) + dotsInFor.push_back(dotOp); + + if (dotsInFor.empty()) + return failure(); + + // TODO: segfault (original for still has uses) + // when used in flash attention that has 2 dots in the loop + if (dotsInFor.size() > 1) + return failure(); + + // returns source of cvt + auto getPrefetchSrc = [](Value v) -> Value { + if (auto cvt = v.getDefiningOp()) + if (isSharedEncoding(cvt.getOperand())) + return cvt.src(); + return Value(); + }; + + auto getIncomingOp = [this](Value v) -> Value { + if (auto arg = v.dyn_cast()) + if (arg.getOwner()->getParentOp() == forOp.getOperation()) + return forOp.getOpOperandForRegionIterArg(arg).get(); + return Value(); + }; + + auto getYieldOp = [this](Value v) -> Value { + auto arg = v.cast(); + unsigned yieldIdx = arg.getArgNumber() - forOp.getNumInductionVars(); + return yieldOp.getOperand(yieldIdx); + }; + + for (triton::DotOp dot : dotsInFor) { + auto kSize = dot.a().getType().cast().getShape()[1]; + // Skip prefetching if kSize is less than prefetchWidth + if (kSize < prefetchWidth) + continue; + Value aSmem = getPrefetchSrc(dot.a()); + Value bSmem = getPrefetchSrc(dot.b()); + if (aSmem && bSmem) { + Value aHeaderDef = getIncomingOp(aSmem); + Value bHeaderDef = getIncomingOp(bSmem); + // Only prefetch loop arg + if (aHeaderDef && bHeaderDef) { + dots.insert(dot); + dot2aHeaderDef[dot] = aHeaderDef; + dot2bHeaderDef[dot] = bHeaderDef; + dot2aLoopArg[dot] = aSmem; + dot2bLoopArg[dot] = bSmem; + dot2aYield[dot] = getYieldOp(aSmem); + dot2bYield[dot] = getYieldOp(bSmem); + } + } + } + + return success(); +} + +void Prefetcher::emitPrologue() { + OpBuilder builder(forOp); + + for (Value dot : dots) { + Attribute dotEncoding = + dot.getType().cast().getEncoding(); + Value aPrefetched = + generatePrefetch(dot2aHeaderDef[dot], 0, true, dotEncoding, builder); + operand2headPrefetch[dot.getDefiningOp().a()] = aPrefetched; + Value bPrefetched = + generatePrefetch(dot2bHeaderDef[dot], 1, true, dotEncoding, builder); + operand2headPrefetch[dot.getDefiningOp().b()] = bPrefetched; + } +} + +scf::ForOp Prefetcher::createNewForOp() { + OpBuilder builder(forOp); + + SmallVector loopArgs; + for (auto v : forOp.getIterOperands()) + loopArgs.push_back(v); + for (Value dot : dots) { + loopArgs.push_back( + operand2headPrefetch[dot.getDefiningOp().a()]); + loopArgs.push_back( + operand2headPrefetch[dot.getDefiningOp().b()]); + } + + auto newForOp = builder.create( + forOp.getLoc(), forOp.getLowerBound(), forOp.getUpperBound(), + forOp.getStep(), loopArgs); + + auto largestPow2 = [](int64_t n) -> int64_t { + while ((n & (n - 1)) != 0) + n = n & (n - 1); + return n; + }; + + builder.setInsertionPointToStart(newForOp.getBody()); + BlockAndValueMapping mapping; + for (const auto &arg : llvm::enumerate(forOp.getRegionIterArgs())) + mapping.map(arg.value(), newForOp.getRegionIterArgs()[arg.index()]); + mapping.map(forOp.getInductionVar(), newForOp.getInductionVar()); + + for (Operation &op : forOp.getBody()->without_terminator()) { + Operation *newOp = builder.clone(op, mapping); + auto dot = dyn_cast(&op); + if (dots.contains(dot)) { + Attribute dotEncoding = + dot.getType().cast().getEncoding(); + // prefetched dot + Operation *firstDot = builder.clone(*dot, mapping); + if (Value a = operand2headPrefetch.lookup(dot.a())) + firstDot->setOperand( + 0, newForOp.getRegionIterArgForOpOperand(*a.use_begin())); + if (Value b = operand2headPrefetch.lookup(dot.b())) + firstDot->setOperand( + 1, newForOp.getRegionIterArgForOpOperand(*b.use_begin())); + + // remaining part + int64_t kOff = prefetchWidth; + int64_t kRem = dot.a().getType().cast().getShape()[1] - + prefetchWidth; + Operation *prevDot = firstDot; + while (kRem != 0) { + int64_t kShape = largestPow2(kRem); + Value aRem = + generatePrefetch(mapping.lookup(dot2aLoopArg[dot]), 0, false, + dotEncoding, builder, kOff, kShape); + Value bRem = + generatePrefetch(mapping.lookup(dot2bLoopArg[dot]), 1, false, + dotEncoding, builder, kOff, kShape); + newOp = builder.clone(*dot, mapping); + newOp->setOperand(0, aRem); + newOp->setOperand(1, bRem); + newOp->setOperand(2, prevDot->getResult(0)); + prevDot = newOp; + kOff += kShape; + kRem -= kShape; + } + } + // update mapping of results + for (unsigned dstIdx : llvm::seq(unsigned(0), op.getNumResults())) + mapping.map(op.getResult(dstIdx), newOp->getResult(dstIdx)); + } + + // prefetch next iteration + SmallVector yieldValues; + for (Value v : forOp.getBody()->getTerminator()->getOperands()) + yieldValues.push_back(mapping.lookup(v)); + for (Value dot : dots) { + Attribute dotEncoding = + dot.getType().cast().getEncoding(); + yieldValues.push_back(generatePrefetch(mapping.lookup(dot2aYield[dot]), 0, + true, dotEncoding, builder)); + yieldValues.push_back(generatePrefetch(mapping.lookup(dot2bYield[dot]), 1, + true, dotEncoding, builder)); + } + // Update ops of yield + builder.create(yieldOp.getLoc(), yieldValues); + return newForOp; +} + +struct PrefetchPass : public TritonGPUPrefetchBase { + void runOnOperation() override { + getOperation()->walk([&](scf::ForOp forOp) { + Prefetcher prefetcher(forOp); + + if (prefetcher.initialize().failed()) + return; + + prefetcher.emitPrologue(); + + scf::ForOp newForOp = prefetcher.createNewForOp(); + + // replace the original loop + for (unsigned i = 0; i < forOp->getNumResults(); ++i) + forOp->getResult(i).replaceAllUsesWith(newForOp->getResult(i)); + forOp->erase(); + }); + } +}; + +} // anonymous namespace + +std::unique_ptr mlir::createTritonGPUPrefetchPass() { + return std::make_unique(); +} diff --git a/lib/Dialect/TritonGPU/Transforms/TritonGPUConversion.cpp b/lib/Dialect/TritonGPU/Transforms/TritonGPUConversion.cpp new file mode 100644 index 000000000000..37ac71099561 --- /dev/null +++ b/lib/Dialect/TritonGPU/Transforms/TritonGPUConversion.cpp @@ -0,0 +1,103 @@ +#include "triton/Dialect/TritonGPU/Transforms/TritonGPUConversion.h" +#include "mlir/IR/BlockAndValueMapping.h" +#include "triton/Dialect/Triton/IR/Dialect.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" +#include +#include + +using namespace mlir; +using namespace mlir::triton::gpu; + +// +// TypeConverter +// +TritonGPUTypeConverter::TritonGPUTypeConverter(MLIRContext *context, + int numWarps) + : context(context), numWarps(numWarps) { + // TODO: how does MLIR pick the right conversion? + addConversion([](Type type) { return type; }); + addConversion([this](RankedTensorType tensorType) -> RankedTensorType { + // types with encoding are already in the right format + // TODO: check for layout encodings specifically + if (tensorType.getEncoding()) + return tensorType; + // pessimistic values for attributes: + // - 1 element per thread + // - order = arange(rank) + ArrayRef shape = tensorType.getShape(); + int rank = shape.size(); + llvm::SmallVector order(rank); + std::iota(order.begin(), order.end(), 0); + llvm::SmallVector sizePerThread(rank, 1); + Attribute encoding = triton::gpu::BlockedEncodingAttr::get( + this->context, shape, sizePerThread, order, this->numWarps); + return RankedTensorType::get(shape, tensorType.getElementType(), encoding); + }); + + // + // Materializations + // + // This will be called when (newArgType != origArgType) + // This will create newArg, and map(origArg, newArg) + addArgumentMaterialization([&](OpBuilder &builder, + RankedTensorType tensorType, ValueRange inputs, + Location loc) { + llvm_unreachable("Argument rematerialization not implemented"); + return llvm::None; + }); + + // If the origValue still has live user(s), use this to + // convert origValue to newValue + addSourceMaterialization([&](OpBuilder &builder, RankedTensorType tensorType, + ValueRange inputs, Location loc) { + llvm_unreachable("Source rematerialization not implemented"); + return llvm::None; + }); + + // This will be called when (desiredType != newOperandType) + // where, desiredType = typeConverter->convertType(origType) + // NOTE: only for remapped values. + addTargetMaterialization([&](OpBuilder &builder, RankedTensorType tensorType, + ValueRange inputs, Location loc) { + auto cast = + builder.create(loc, tensorType, inputs); + return Optional(cast.getResult()); + // return Optional(cast.getResult(0)); + // llvm_unreachable("Not implemented"); + // return llvm::None; + }); +} + +// +// TritonGPUConversion +// +TritonGPUConversionTarget::TritonGPUConversionTarget( + MLIRContext &context, TritonGPUTypeConverter &typeConverter) + : ConversionTarget(context) { + // TODO: we should also verify ops of TritonGPUDialect + addLegalDialect(); + + // Some ops from SCF are illegal + addIllegalOp(); + + addDynamicallyLegalDialect([&](Operation *op) { + if (typeConverter.isLegal(op)) + return true; + return false; + }); + + // We have requirements for the data layouts + addDynamicallyLegalOp([](triton::DotOp dotOp) -> bool { + Attribute aEncoding = + dotOp.a().getType().cast().getEncoding(); + Attribute bEncoding = + dotOp.b().getType().cast().getEncoding(); + if (aEncoding && aEncoding.isa() && + bEncoding && bEncoding.isa()) + return true; + return false; + }); +} diff --git a/lib/Target/CMakeLists.txt b/lib/Target/CMakeLists.txt new file mode 100644 index 000000000000..9b24f0ff225b --- /dev/null +++ b/lib/Target/CMakeLists.txt @@ -0,0 +1,2 @@ +add_subdirectory(LLVMIR) +add_subdirectory(PTX) diff --git a/lib/Target/LLVMIR/CMakeLists.txt b/lib/Target/LLVMIR/CMakeLists.txt new file mode 100644 index 000000000000..73a89676a12c --- /dev/null +++ b/lib/Target/LLVMIR/CMakeLists.txt @@ -0,0 +1,12 @@ +add_mlir_translation_library(TritonLLVMIR + LLVMIRTranslation.cpp + + LINK_COMPONENTS + Core + + LINK_LIBS PUBLIC + MLIRIR + MLIRLLVMIR + MLIRSupport + MLIRTargetLLVMIRExport + ) diff --git a/lib/Target/LLVMIR/LLVMIRTranslation.cpp b/lib/Target/LLVMIR/LLVMIRTranslation.cpp new file mode 100644 index 000000000000..f193bcc1daa3 --- /dev/null +++ b/lib/Target/LLVMIR/LLVMIRTranslation.cpp @@ -0,0 +1,237 @@ +#include "triton/Target/LLVMIR/LLVMIRTranslation.h" + +#include "mlir/Conversion/Passes.h" +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "mlir/ExecutionEngine/ExecutionEngine.h" +#include "mlir/ExecutionEngine/OptUtils.h" +#include "mlir/IR/Dialect.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h" +#include "mlir/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.h" +#include "mlir/Target/LLVMIR/Export.h" +#include "mlir/Target/LLVMIR/LLVMTranslationInterface.h" +#include "mlir/Transforms/Passes.h" +#include "triton/Conversion/TritonGPUToLLVM/TritonGPUToLLVMPass.h" +#include "triton/Tools/Sys/GetEnv.hpp" +#include "llvm/IR/Constants.h" +#include "llvm/IRReader/IRReader.h" +#include "llvm/Linker/Linker.h" +#include "llvm/Support/SourceMgr.h" + +namespace mlir { +namespace triton { + +// Describes NVVM Metadata. It is used to record the nvvm related meta +// information from mlir module. +struct NVVMMetadata { + int maxntidx{-1}; + bool is_kernel{}; + // Free to extend with other information. +}; + +// Add the nvvm related metadata to LLVM IR. +void amendLLVMFunc(llvm::Function *func, const NVVMMetadata &metadata) { + auto *module = func->getParent(); + auto &ctx = func->getContext(); + + if (metadata.maxntidx > 0) { + auto i32_ty = llvm::IntegerType::get(ctx, 32); + auto warps = + llvm::ConstantInt::get(i32_ty, llvm::APInt(32, metadata.maxntidx)); + + llvm::Metadata *md_args[] = {llvm::ValueAsMetadata::get(func), + llvm::MDString::get(ctx, "maxntidx"), + llvm::ValueAsMetadata::get(warps)}; + + module->getOrInsertNamedMetadata("nvvm.annotations") + ->addOperand(llvm::MDNode::get(ctx, md_args)); + } + + if (metadata.is_kernel) { + llvm::Metadata *md_args[] = { + llvm::ValueAsMetadata::get(func), llvm::MDString::get(ctx, "kernel"), + llvm::ValueAsMetadata::get( + llvm::ConstantInt::get(llvm::Type::getInt32Ty(ctx), 1))}; + module->getOrInsertNamedMetadata("nvvm.annotations") + ->addOperand(llvm::MDNode::get(ctx, md_args)); + } +} + +void extractNVVMMetadata(mlir::ModuleOp module, + llvm::DenseMap *dic) { + for (auto op : module.getOps()) { + NVVMMetadata meta; + + bool hasMetadata{}; + + // maxntid + if (op->hasAttr("nvvm.maxntid")) { + auto attr = op->getAttr("nvvm.maxntid"); + meta.maxntidx = attr.dyn_cast().getInt(); + hasMetadata = true; + } + + // kernel + if (op->hasAttr("nvvm.kernel")) { + meta.is_kernel = true; + hasMetadata = true; + } + + if (hasMetadata) + dic->try_emplace(op.getNameAttr().strref(), std::move(meta)); + } +} + +std::unique_ptr +translateLLVMToLLVMIR(llvm::LLVMContext *llvmContext, mlir::ModuleOp module) { + auto context = module->getContext(); + DialectRegistry registry; + mlir::registerLLVMDialectTranslation(registry); + mlir::registerNVVMDialectTranslation(registry); + context->appendDialectRegistry(registry); + + llvm::DenseMap nvvmMetadata; + extractNVVMMetadata(module, &nvvmMetadata); + + auto llvmModule = mlir::translateModuleToLLVMIR(module, *llvmContext); + if (!llvmModule) { + llvm::errs() << "Failed to emit LLVM IR\n"; + return nullptr; + } + + auto optPipeline = mlir::makeOptimizingTransformer( + /*optLevel=*/3, /*sizeLevel=*/0, + /*targetMachine=*/nullptr); + + if (auto err = optPipeline(llvmModule.get())) { + llvm::errs() << "Failed to optimize LLVM IR " << err << "\n"; + return nullptr; + } + + for (auto &func : llvmModule->functions()) { + auto it = nvvmMetadata.find(func.getName()); + if (it != nvvmMetadata.end()) + amendLLVMFunc(&func, it->second); + } + + return llvmModule; +} + +std::unique_ptr +translateTritonGPUToLLVMIR(llvm::LLVMContext *llvmContext, + mlir::ModuleOp module, int computeCapability) { + mlir::PassManager pm(module->getContext()); + applyPassManagerCLOptions(pm); + auto printingFlags = mlir::OpPrintingFlags(); + printingFlags.elideLargeElementsAttrs(16); + pm.enableIRPrinting( + /*shouldPrintBeforePass=*/nullptr, + /*shouldPrintAfterPass=*/ + [](mlir::Pass *pass, mlir::Operation *) { + return ::triton::tools::getBoolEnv("MLIR_ENABLE_DUMP"); + }, + /*printModuleScope=*/false, + /*printAfterOnlyOnChange=*/true, + /*printAfterOnlyOnFailure*/ false, llvm::dbgs(), printingFlags); + + pm.addPass(createConvertTritonGPUToLLVMPass(computeCapability)); + // Canonicalize to eliminate the remaining UnrealizedConversionCastOp + pm.addPass(mlir::createCanonicalizerPass()); + pm.addPass(mlir::createCSEPass()); // Simplify the IR to improve readability. + pm.addPass(mlir::createSymbolDCEPass()); + pm.addPass(mlir::createCanonicalizerPass()); + + if (failed(pm.run(module))) { + llvm::errs() << "Pass execution failed"; + return nullptr; + } + + std::map externLibs; + SmallVector funcs; + module.walk([&](LLVM::LLVMFuncOp func) { + if (func.isExternal()) + funcs.push_back(func); + }); + + for (auto &func : funcs) { + if (func.getOperation()->hasAttr("libname")) { + auto name = + func.getOperation()->getAttr("libname").dyn_cast(); + auto path = + func.getOperation()->getAttr("libpath").dyn_cast(); + if (name) { + std::string lib_name = name.str(); + externLibs[lib_name] = path.str(); + } + } + } + + if (module.getOperation()->hasAttr("triton_gpu.externs")) { + auto dict = module.getOperation() + ->getAttr("triton_gpu.externs") + .dyn_cast(); + for (auto &attr : dict) { + externLibs[attr.getName().strref().trim().str()] = + attr.getValue().dyn_cast().strref().trim().str(); + } + } + + auto llvmir = translateLLVMToLLVMIR(llvmContext, module); + if (!llvmir) { + llvm::errs() << "Translate to LLVM IR failed"; + return nullptr; + } + + llvm::SMDiagnostic err; + for (auto &lib : externLibs) { + if (linkExternLib(*llvmir, lib.second)) + return nullptr; + } + + return llvmir; +} + +void addExternalLibs(mlir::ModuleOp &module, + const std::vector &names, + const std::vector &paths) { + if (names.empty() || names.size() != paths.size()) + return; + + llvm::SmallVector attrs; + + for (size_t i = 0; i < names.size(); ++i) { + auto name = StringAttr::get(module->getContext(), names[i]); + auto path = StringAttr::get(module->getContext(), paths[i]); + NamedAttribute attr(name, path); + attrs.push_back(attr); + } + + DictionaryAttr dict = DictionaryAttr::get(module->getContext(), attrs); + module.getOperation()->setAttr("triton_gpu.externs", dict); +} + +bool linkExternLib(llvm::Module &module, llvm::StringRef path) { + llvm::SMDiagnostic err; + auto &ctx = module.getContext(); + + auto extMod = llvm::parseIRFile(path, err, ctx); + if (!extMod) { + llvm::errs() << "Failed to load " << path; + return true; + } + + extMod->setTargetTriple(module.getTargetTriple()); + extMod->setDataLayout(module.getDataLayout()); + + if (llvm::Linker::linkModules(module, std::move(extMod), + llvm::Linker::Flags::LinkOnlyNeeded)) { + llvm::errs() << "Failed to link " << path; + return true; + } + + return false; +} + +} // namespace triton +} // namespace mlir diff --git a/lib/Target/PTX/CMakeLists.txt b/lib/Target/PTX/CMakeLists.txt new file mode 100644 index 000000000000..69aa5710cdd1 --- /dev/null +++ b/lib/Target/PTX/CMakeLists.txt @@ -0,0 +1,9 @@ +add_mlir_translation_library(TritonPTX + PTXTranslation.cpp + + LINK_COMPONENTS + Core + + LINK_LIBS PUBLIC + TritonLLVMIR + ) diff --git a/lib/Target/PTX/PTXTranslation.cpp b/lib/Target/PTX/PTXTranslation.cpp new file mode 100644 index 000000000000..fae3b5c33a7a --- /dev/null +++ b/lib/Target/PTX/PTXTranslation.cpp @@ -0,0 +1,144 @@ +#include "triton/Target/PTX/PTXTranslation.h" +#include "triton/Target/LLVMIR/LLVMIRTranslation.h" + +#include "llvm/IR/IRBuilder.h" +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Verifier.h" +#include "llvm/MC/TargetRegistry.h" +#include "llvm/Support/TargetSelect.h" +#include "llvm/Target/TargetMachine.h" +#include + +namespace triton { + +static void initLLVM() { + LLVMInitializeNVPTXTargetInfo(); + LLVMInitializeNVPTXTarget(); + LLVMInitializeNVPTXTargetMC(); + LLVMInitializeNVPTXAsmPrinter(); +} + +static bool findAndReplace(std::string &str, const std::string &begin, + const std::string &end, const std::string &target) { + size_t startReplace = str.find(begin); + if (startReplace == std::string::npos) + return false; + size_t endReplace = str.find(end, startReplace); + if (endReplace == std::string::npos) + return false; + str.replace(startReplace, endReplace + 1 - startReplace, target); + return true; +} + +static void linkExternal(llvm::Module &module) { + bool hasExternal = false; + for (auto &func : module) { + if (func.hasExternalLinkage()) { + hasExternal = true; + break; + } + } + + if (hasExternal) { + namespace fs = std::filesystem; + // [triton root dir]/python/triton/language/libdevice.10.bc + static const fs::path libdevice = fs::path(__FILE__) + .parent_path() + .parent_path() + .parent_path() + .parent_path() / + "python" / "triton" / "language" / + "libdevice.10.bc"; + if (mlir::triton::linkExternLib(module, libdevice.string())) + llvm::errs() << "link failed for: " << libdevice.string(); + + // please check https://llvm.org/docs/NVPTXUsage.html#reflection-parameters + // this will enable fast math path in libdevice + // for example, when enable nvvm-reflect-ftz, sqrt.approx.f32 will change to + // sqrt.approx.ftz.f32 + auto &ctx = module.getContext(); + llvm::Type *I32 = llvm::Type::getInt32Ty(ctx); + llvm::Metadata *mdFour = + llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(I32, 4)); + llvm::Metadata *mdName = llvm::MDString::get(ctx, "nvvm-reflect-ftz"); + llvm::Metadata *mdOne = + llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(I32, 1)); + llvm::MDNode *reflect = llvm::MDNode::get(ctx, {mdFour, mdName, mdOne}); + module.addModuleFlag(reflect); + } +} + +std::string translateLLVMIRToPTX(llvm::Module &module, int cc, int version) { + linkExternal(module); + + // LLVM version in use may not officially support target hardware + int maxNNVMCC = 75; + // options + auto options = llvm::cl::getRegisteredOptions(); + auto *shortPtr = + static_cast *>(options["nvptx-short-ptr"]); + assert(shortPtr); + shortPtr->setValue(true); + // compute capability + std::string sm = "sm_" + std::to_string(cc); + // max PTX version + int ptxMajor = version / 10; + int ptxMinor = version % 10; + // create + llvm::SmallVector buffer; + std::string triple = "nvptx64-nvidia-cuda"; + std::string proc = "sm_" + std::to_string(std::min(cc, maxNNVMCC)); + std::string layout = ""; + std::string features = ""; + // std::string features = "+ptx" + std::to_string(std::min(ptx, + // max_nvvm_ptx)); + initLLVM(); + // verify and store llvm + llvm::legacy::PassManager pm; + pm.add(llvm::createVerifierPass()); + pm.run(module); + // module->print(llvm::outs(), nullptr); + + // create machine + module.setTargetTriple(triple); + std::string error; + auto target = + llvm::TargetRegistry::lookupTarget(module.getTargetTriple(), error); + llvm::TargetOptions opt; + opt.AllowFPOpFusion = llvm::FPOpFusion::Fast; + opt.UnsafeFPMath = false; + opt.NoInfsFPMath = false; + opt.NoNaNsFPMath = true; + llvm::TargetMachine *machine = target->createTargetMachine( + module.getTargetTriple(), proc, features, opt, llvm::Reloc::PIC_, + llvm::None, llvm::CodeGenOpt::Aggressive); + // set data layout + if (layout.empty()) + module.setDataLayout(machine->createDataLayout()); + else + module.setDataLayout(layout); + // emit machine code + for (llvm::Function &f : module.functions()) + f.addFnAttr(llvm::Attribute::AlwaysInline); + llvm::legacy::PassManager pass; + llvm::raw_svector_ostream stream(buffer); + // emit + machine->addPassesToEmitFile(pass, stream, nullptr, + llvm::CodeGenFileType::CGFT_AssemblyFile); + pass.run(module); + + // post-process + std::string result(buffer.begin(), buffer.end()); + findAndReplace(result, ".version", "\n", + ".version " + std::to_string(ptxMajor) + "." + + std::to_string(ptxMinor) + "\n"); + findAndReplace(result, ".target", "\n", ".target " + sm + "\n"); + while (findAndReplace(result, "\t// begin inline asm", "\n", "")) + ; + while (findAndReplace(result, "\t// end inline asm", "\n", "")) + ; + return result; +} + +} // namespace triton diff --git a/lib/codegen/analysis/align.cc b/lib/codegen/analysis/align.cc deleted file mode 100644 index a4a066928208..000000000000 --- a/lib/codegen/analysis/align.cc +++ /dev/null @@ -1,634 +0,0 @@ -#include "triton/codegen/analysis/align.h" -#include "triton/ir/utils.h" -#include "triton/ir/module.h" -#include "triton/ir/function.h" -#include "triton/ir/basic_block.h" -#include "triton/ir/instructions.h" -#include "triton/ir/type.h" -#include - -namespace triton { -namespace codegen{ -namespace analysis{ - - -// Function for extended Euclidean Algorithm -int gcd_impl(int a, int b, int *x, int *y) -{ - // Base Case - if (a == 0) - { - *x = 0; - *y = 1; - return b; - } - - int x1, y1; // To store results of recursive call - int gcd = gcd_impl(b%a, a, &x1, &y1); - - // Update x and y using results of - // recursive call - *x = y1 - (b/a) * x1; - *y = x1; - - return gcd; -} - -int gcd(int a, int b) { - int x, y; - return gcd_impl(a, b, &x, &y); -} - - -inline int lcm(int a, int b) { - return (a * b) / gcd(a, b); -} - -template -inline T add_to_cache(ir::value *i, T value, std::map &map) { - return map[i] = value; -} - -/* - * is constant - */ - -std::vector align::get_shapes(ir::value *v) { - ir::type *ty = v->get_type(); - if(ty->is_block_ty()) - return ty->get_block_shapes(); - else - return {1}; -} - -std::vector align::populate_is_constant_phi(ir::phi_node* x) { - auto shapes = get_shapes(x); - std::vector result(shapes.size(), cst_info{1, 0}); - for(unsigned n = 0; n < x->get_num_incoming(); n++){ - ir::value* inc = x->get_incoming_value(n); - auto it = is_constant_.find(inc); - if(it != is_constant_.end()) - result = it->second; - } - return add_to_cache(x, result, is_constant_); - // recurse - for(unsigned n = 0; n < x->get_num_incoming(); n++){ - ir::value* inc = x->get_incoming_value(n); - auto cst = populate_is_constant(inc); - for(size_t d = 0; d < cst.size(); d++) - result[d].num_cst = std::min(result[d].num_cst, cst[d].num_cst); - } - return add_to_cache(x, result, is_constant_); -} - -std::vector align::populate_is_constant_splat(ir::splat_inst* x) { - auto shapes = get_shapes(x); - ir::value* op = x->get_operand(0); - std::vector result; - auto op_cst = populate_is_constant(op); - for(auto d: shapes) - result.push_back(cst_info{d, op_cst[0].value}); - return add_to_cache(x, result, is_constant_); -} - -std::vector align::populate_is_constant_reshape(ir::reshape_inst* x) { - auto x_shapes = get_shapes(x); - std::vector result; - ir::value *op = x->get_operand(0); - auto op_shapes = op->get_type()->get_block_shapes(); - auto op_cst = populate_is_constant(op); - unsigned current = 0; - bool is_skewed = false; - for(size_t d = 0; d < x_shapes.size(); d ++){ - cst_info ax ; - if(x_shapes[d] == 1) - ax = {1, op_cst[current].value}; - else if(!is_skewed - && x_shapes[d] == op_shapes[current]) - ax = {x_shapes[d], op_cst[current++].value}; - else { - is_skewed = true; - ax = {x_shapes[d], 0}; - } - result.push_back(ax); - } - return add_to_cache(x, result, is_constant_); -} - -std::vector align::populate_is_constant_dequantize(ir::dequantize_inst* x) { - auto x_shapes = get_shapes(x); - std::vector result; - ir::value *op = x->get_operand(0); - auto op_shapes = op->get_type()->get_block_shapes(); - auto op_cst = populate_is_constant(op); - for(size_t d = 0; d < x_shapes.size(); d++) { - result.push_back(op_cst[d]); - } - return add_to_cache(x, result, is_constant_); -} - -std::vector align::populate_is_constant_broadcast(ir::broadcast_inst* x) { - auto x_shapes = get_shapes(x); - std::vector result; - ir::value *op = x->get_operand(0); - auto op_shapes = op->get_type()->get_block_shapes(); - auto op_cst = populate_is_constant(op); - for(size_t d = 0; d < x_shapes.size(); d++) - if(op_shapes[d] == 1) - result.push_back(cst_info{x_shapes[d], op_cst[d].value}); - else - result.push_back(op_cst[d]); - return add_to_cache(x, result, is_constant_); -} - -std::vector align::populate_is_constant_cmp(ir::cmp_inst* x) { - auto x_shapes = get_shapes(x); - std::vector result; - ir::value* lhs_op = x->get_operand(0); - ir::value* rhs_op = x->get_operand(1); - auto lhs = populate_is_constant(lhs_op); - auto rhs = populate_is_constant(rhs_op); - auto lhs_max_contiguous = populate_max_contiguous(lhs_op); - auto rhs_max_contiguous = populate_max_contiguous(rhs_op); - auto lhs_multiple_of = populate_starting_multiple(lhs_op); - auto rhs_multiple_of = populate_starting_multiple(rhs_op); - for(size_t d = 0; d < x_shapes.size(); d++) { - cst_info ax = {1, 0}; - // Examples: - // 16 17 18 ... 32 < 24 24 24 ... 24 => equal in groups of 8 - // 16 17 18 ... 32 < 20 20 20 ... 20 => equal in groups of 4 - // 16 17 18 ... 32 < 16 16 16 ... 16 => equal in groups of 16 - // - // if LHS is a range of N continuous (or equal) elements that starts at M, - // and RHS is a set of N constants that start at K - // then the result in constant in groups of gcd(M, K) - if(rhs[d].num_cst % lhs_max_contiguous[d] == 0 || - rhs[d].num_cst % lhs[d].num_cst == 0) - ax.num_cst = gcd(lhs_multiple_of[d], rhs_multiple_of[d]); - result.push_back(ax); - } - return add_to_cache(x, result, is_constant_); -} - - -std::vector align::populate_is_constant_binop(ir::binary_operator* x) { - auto x_shapes = get_shapes(x); - std::vector result; - ir::value* lhs_op = x->get_operand(0); - ir::value* rhs_op = x->get_operand(1); - auto lhs = populate_is_constant(lhs_op); - auto rhs = populate_is_constant(rhs_op); - auto lhs_max_contiguous = populate_max_contiguous(lhs_op); - auto rhs_max_contiguous = populate_max_contiguous(rhs_op); - auto lhs_multiple_of = populate_starting_multiple(lhs_op); - auto rhs_multiple_of = populate_starting_multiple(rhs_op); - for(size_t d = 0; d < x_shapes.size(); d++) { - cst_info ax; - if(lhs[d].num_cst==0 && rhs[d].value && x->is_int_div()){ - unsigned num_constants = gcd(lhs_max_contiguous[d], rhs[d].value); - ax = {num_constants, 0}; - } - else - ax = {std::min(lhs[d].num_cst, rhs[d].num_cst), 0}; - result.push_back(ax); - } - return add_to_cache(x, result, is_constant_); -} - -std::vector align::populate_is_constant_gep(ir::getelementptr_inst* x) { - auto x_shapes = get_shapes(x); - ir::value* lhs_op = x->get_operand(0); - ir::value* rhs_op = x->get_operand(1); - auto lhs = populate_is_constant(lhs_op); - auto rhs = populate_is_constant(rhs_op); - std::vector result; - for(size_t d = 0; d < x_shapes.size(); d++) - result.push_back({std::min(lhs[d].num_cst, rhs[d].num_cst), 0}); - return add_to_cache(x, result, is_constant_); -} - -std::vector align::populate_is_constant_default(ir::value *v) { - auto shapes = get_shapes(v); - std::vector result(shapes.size(), {1, 0}); - return add_to_cache(v, result, is_constant_); -} - -std::vector align::populate_is_constant(ir::value *v) { - if(is_constant_.find(v) != is_constant_.end()) - return is_constant_.at(v); - if(auto *x = dynamic_cast(v)) - return add_to_cache(v, {cst_info{true, std::min(x->get_value(), 128)}}, is_constant_); - if(auto *x = dynamic_cast(v)) - return populate_is_constant_phi(x); - if(auto *x = dynamic_cast(v)) - return populate_is_constant_splat(x); - if(auto *x = dynamic_cast(v)) - return populate_is_constant_reshape(x); - if(auto *x = dynamic_cast(v)) - return populate_is_constant_dequantize(x); - if(auto *x = dynamic_cast(v)) - return populate_is_constant_broadcast(x); - if(auto *x = dynamic_cast(v)) - return populate_is_constant_binop(x); - if(auto *x = dynamic_cast(v)) - return populate_is_constant_cmp(x); - if(auto *x = dynamic_cast(v)) - return populate_is_constant_gep(x); - return populate_is_constant_default(v); -} - - -/* - * max contiguous - */ - -std::vector align::populate_max_contiguous_phi(ir::phi_node* x) { - auto shapes = get_shapes(x); - std::vector result(shapes.size(), 1); - for(unsigned n = 0; n < x->get_num_incoming(); n++){ - ir::value* inc = x->get_incoming_value(n); - auto it = max_contiguous_.find(inc); - if(it != max_contiguous_.end()) - result = it->second; - } - add_to_cache(x, result, max_contiguous_); - // recurse - for(unsigned n = 0; n < x->get_num_incoming(); n++){ - ir::value* inc = x->get_incoming_value(n); - auto contiguous = populate_max_contiguous(inc); - for(size_t d = 0; d < result.size(); d++) - result[d] = std::min(result[d], contiguous[d]); - } - return add_to_cache(x, result, max_contiguous_); - -} - -std::vector align::populate_max_contiguous_splat(ir::splat_inst* x) { - auto x_shapes = get_shapes(x); - std::vector result; - for(size_t d = 0; d < x_shapes.size(); d++) - result.push_back({1}); - return add_to_cache(x, result, max_contiguous_); -} - -std::vector align::populate_max_contiguous_reshape(ir::reshape_inst* x) { - auto shapes = get_shapes(x); - std::vector result; - ir::value *op = x->get_operand(0); - auto op_shapes = op->get_type()->get_block_shapes(); - auto op_mc = populate_max_contiguous(op); - unsigned current = 0; - bool is_skewed = false; - for(size_t d = 0; d < shapes.size(); d ++){ - if(shapes[d] == 1) - result.push_back(1); - else if(!is_skewed - && shapes[d] == op_shapes[current]) - result.push_back(op_mc[current++]); - else { - is_skewed = true; - result.push_back(1); - } - } - return add_to_cache(x, result, max_contiguous_); -} - -std::vector align::populate_max_contiguous_dequantize(ir::dequantize_inst* x) { - auto shapes = get_shapes(x); - std::vector result; - ir::value *op = x->get_operand(0); - auto ret_last_dim = (x->get_type()->get_block_shapes()).back(); - auto op_last_dim = (op->get_type()->get_block_shapes()).back(); - auto op_mc = populate_max_contiguous(op); - for(size_t d = 0; d < shapes.size(); d++) { - unsigned factor = 1; - if (d == shapes.size() - 1) { - factor = ret_last_dim / op_last_dim; - } - result.push_back(factor * op_mc[d]); - } - return add_to_cache(x, result, max_contiguous_); -} - -std::vector align::populate_max_contiguous_broadcast(ir::broadcast_inst* x) { - auto shapes = get_shapes(x); - std::vector result; - ir::value *op = x->get_operand(0); - auto op_shapes = op->get_type()->get_block_shapes(); - auto op_mc = populate_max_contiguous(op); - for(size_t d = 0; d < shapes.size(); d++) - if(op_shapes[d] == 1) - result.push_back(1); - else - result.push_back(op_mc[d]); - return add_to_cache(x, result, max_contiguous_); -} - -std::vector align::populate_max_contiguous_binop(ir::binary_operator* x) { - auto shapes = get_shapes(x); - ir::value* lhs = x->get_operand(0); - ir::value* rhs = x->get_operand(1); - auto lhs_max_contiguous = populate_max_contiguous(lhs); - auto rhs_max_contiguous = populate_max_contiguous(rhs); - auto lhs_cst_info = populate_is_constant(lhs); - auto rhs_cst_info = populate_is_constant(rhs); - auto lhs_starting_multiple = populate_starting_multiple(lhs); - auto rhs_starting_multiple = populate_starting_multiple(rhs); - std::vector result; - for(size_t d = 0; d < shapes.size(); d++){ - unsigned value = 1; - if(x->is_int_rem() && rhs_starting_multiple[d] > 0){ - value = std::min(lhs_max_contiguous[d], rhs_starting_multiple[d]); - } - if(x->is_int_mult()){ - unsigned lvalue = 1, rvalue = 1; - if(rhs_cst_info[d].value == 1) - lvalue = lhs_max_contiguous[d]; - if(lhs_cst_info[d].value == 1) - rvalue = rhs_max_contiguous[d]; - value = std::max(lvalue, rvalue); - } - if(x->is_int_add_sub()){ - unsigned lvalue = 1, rvalue = 1; - lvalue = gcd(rhs_max_contiguous[d], lhs_cst_info[d].num_cst); - rvalue = gcd(lhs_max_contiguous[d], rhs_cst_info[d].num_cst); - value = std::max(lvalue, rvalue); - } - result.push_back(value); - } - return add_to_cache(x, result, max_contiguous_); -} - -std::vector align::populate_max_contiguous_gep(ir::getelementptr_inst* x) { - auto shapes = get_shapes(x); - ir::value* lhs = x->get_operand(0); - ir::value* rhs = x->get_operand(1); - auto lhs_max_contiguous = populate_max_contiguous(lhs); - auto rhs_max_contiguous = populate_max_contiguous(rhs); - auto lhs_cst_info = populate_is_constant(lhs); - auto rhs_cst_info = populate_is_constant(rhs); - std::vector result(shapes.size(), 1); - for(size_t d = 0; d < shapes.size(); d++){ - unsigned lvalue = 1, rvalue = 1; - if(lhs_cst_info[d].num_cst) - lvalue = rhs_max_contiguous[d]; - if(rhs_cst_info[d].num_cst) - rvalue = lhs_max_contiguous[d]; - result[d] = std::max(lvalue, rvalue); - } - return add_to_cache(x, result, max_contiguous_); -} - -std::vector align::populate_max_contiguous_default(ir::value* v) { - if(!v->get_type()->is_block_ty()) - return add_to_cache(v, {1}, max_contiguous_); - auto shapes = v->get_type()->get_block_shapes(); - if(dynamic_cast(v)) - return add_to_cache(v, {shapes[0]}, max_contiguous_); - return add_to_cache(v, std::vector(shapes.size(), 1), max_contiguous_); -} - -std::vector align::populate_max_contiguous_cast(ir::cast_inst* v){ - auto result = populate_max_contiguous(v->get_operand(0)); - return add_to_cache(v, result, max_contiguous_); -} - -std::vector align::populate_max_contiguous(ir::value *v){ - if(max_contiguous_.find(v) != max_contiguous_.end()) - return max_contiguous_.at(v); - if(auto *x = dynamic_cast(v)){ - std::vector max_contiguous = x->get_metadata(ir::metadata::max_contiguous); - if(!max_contiguous.empty()) - return add_to_cache(x, max_contiguous, max_contiguous_); - } - if(auto *x = dynamic_cast(v)) - return populate_max_contiguous_cast(x); - if(auto *x = dynamic_cast(v)) - return populate_max_contiguous_splat(x); - if(auto *x = dynamic_cast(v)) - return populate_max_contiguous_reshape(x); - if(auto *x = dynamic_cast(v)) - return populate_max_contiguous_dequantize(x); - if(auto *x = dynamic_cast(v)) - return populate_max_contiguous_broadcast(x); - if(auto *x = dynamic_cast(v)) - return populate_max_contiguous_binop(x); - if(auto *x = dynamic_cast(v)) - return populate_max_contiguous_gep(x); - if(auto *x = dynamic_cast(v)) - return populate_max_contiguous_phi(x); - return populate_max_contiguous_default(v); -} - - -/* - * starting multiple - */ - -std::vector align::populate_starting_multiple_splat(ir::splat_inst* x){ - auto shapes = get_shapes(x); - auto op = populate_starting_multiple(x->get_operand(0)); - std::vector result(shapes.size(), op[0]); - return add_to_cache(x, result, starting_multiple_); -} - -std::vector align::populate_starting_multiple_reshape(ir::reshape_inst* x){ - auto op = populate_starting_multiple(x->get_operand(0)); - auto op_shapes = get_shapes(x->get_operand(0)); - auto shapes = get_shapes(x); - std::vector result(shapes.size(), 1); - unsigned current = 0; - bool is_skewed = false; - for(size_t d = 0; d < shapes.size(); d ++){ - if(shapes[d] == 1) - result[d] = 1; - else if(!is_skewed - && shapes[d] == op_shapes[current]) - result[d] = op[current++]; - else { - is_skewed = true; - result[d] = 1; - } - } - return add_to_cache(x, result, starting_multiple_); -} - -std::vector align::populate_starting_multiple_dequantize(ir::dequantize_inst* x){ - auto shapes = get_shapes(x); - std::vector result; - ir::value *op = x->get_operand(0); - auto ret_last_dim = (x->get_type()->get_block_shapes()).back(); - auto op_last_dim = (op->get_type()->get_block_shapes()).back(); - auto op_multiple = populate_starting_multiple(op); - for(size_t d = 0; d < shapes.size(); d++) { - unsigned factor = 1; - if (d == shapes.size() - 1) { - factor = ret_last_dim / op_last_dim; - } - result.push_back(factor * op_multiple[d]); - } - return add_to_cache(x, result, starting_multiple_); -} - -std::vector align::populate_starting_multiple_broadcast(ir::broadcast_inst* x){ - auto result = populate_starting_multiple(x->get_operand(0)); - return add_to_cache(x, result, starting_multiple_); -} - -std::vector align::populate_starting_multiple_binop(ir::binary_operator* x){ - auto lhs = populate_starting_multiple(x->get_operand(0)); - auto rhs = populate_starting_multiple(x->get_operand(1)); - std::vector result(lhs.size(), 1); - for(size_t d = 0; d < lhs.size(); d++){ - if(x->is_int_mult()) - result[d] = lhs[d] * rhs[d]; - if(x->is_int_add_sub()) - result[d] = gcd(lhs[d], rhs[d]); - if(x->is_int_div()) - result[d] = (lhs[d] == (1 << 31)) ? 1 << 31 : 1; - if(x->is_int_rem() && rhs[d] > 1){ - result[d] = gcd(lhs[d], rhs[d]); - } - if(x->is_shl()) - result[d] = lhs[d] << rhs[d]; - if(x->is_shr()) - result[d] = std::max(lhs[d] >> rhs[d], 1); - } - return add_to_cache(x, result, starting_multiple_); -} - -std::vector align::populate_starting_multiple_gep(ir::getelementptr_inst* x){ - auto lhs = populate_starting_multiple(x->get_operand(0)); - auto rhs = populate_starting_multiple(x->get_operand(1)); - std::vector result(lhs.size(), 1); - for(size_t d = 0; d < lhs.size(); d++){ - result[d] = gcd(lhs[d], rhs[d]); -// std::cout << "starting multiple: " << x->get_name() << " " << d << " " << result[d] << std::endl; - } - return add_to_cache(x, result, starting_multiple_); -} - -std::vector align::populate_starting_multiple_phi(ir::phi_node* x){ - auto shape = get_shapes(x); - std::vector result(shape.size(), 1); - for(unsigned n = 0; n < x->get_num_incoming(); n++){ - ir::value* inc = x->get_incoming_value(n); - if(starting_multiple_.find(inc) != starting_multiple_.end()) - result = starting_multiple_.at(inc); - } - add_to_cache(x, result, starting_multiple_); - // recurse - for(unsigned n = 0; n < x->get_num_incoming(); n++){ - ir::value* inc = x->get_incoming_value(n); - auto sm = populate_starting_multiple(inc); - for(size_t d = 0; d < result.size(); d++) - result[d] = gcd(result[d], sm[d]); - } - return add_to_cache(x, result, starting_multiple_); -} - - -std::vector align::populate_starting_multiple_cast(ir::cast_inst* x){ - auto result = populate_starting_multiple(x->get_operand(0)); - return add_to_cache(x, result, starting_multiple_); -} - -std::vector align::populate_starting_multiple_default(ir::value* v) { - ir::type* ty = v->get_type(); - if(ty->is_block_ty()) { - return add_to_cache(v, ty->get_block_shapes(), starting_multiple_); - } - if(auto *x = dynamic_cast(v)){ - std::set attributes = x->get_parent()->get_attributes(x); - for(auto attr: attributes){ - if(attr.get_kind() == ir::multiple_of){ - return add_to_cache(x, {attr.get_value()}, starting_multiple_); - } - if(attr.get_kind() == ir::aligned){ - ir::type* ty = x->get_type()->get_pointer_element_ty(); - int nbits = ty->get_primitive_size_in_bits(); - int nbytes = std::max(nbits / 8, 1); - return add_to_cache(x, {attr.get_value() / nbytes}, starting_multiple_); - } - } - } - return add_to_cache(v, {1}, starting_multiple_); -} - -unsigned get_max_multiple(int val){ - if(val == 0) return 1 << 31; - if(val % 128 == 0) return 128; - if(val % 64 == 0) return 64; - if(val % 32 == 0) return 32; - if(val % 16 == 0) return 16; - if(val % 8 == 0) return 8; - if(val % 4 == 0) return 4; - if(val % 2 == 0) return 2; - return 1; -} - -std::vector align::populate_starting_multiple(ir::value *v){ - if(starting_multiple_.find(v) != starting_multiple_.end()) - return starting_multiple_.at(v); - if(auto *x = dynamic_cast(v)){ - std::vector multiple_of = x->get_metadata(ir::metadata::multiple_of); - if(!multiple_of.empty()) - return add_to_cache(x, multiple_of, starting_multiple_); - } - if(auto *x = dynamic_cast(v)) - return populate_starting_multiple_cast(x); - if(auto *x = dynamic_cast(v)) - return populate_starting_multiple_binop(x); - if(auto *x = dynamic_cast(v)) - return add_to_cache(x, {get_max_multiple(x->get_value())}, starting_multiple_); - if(auto *x = dynamic_cast(v)) - return add_to_cache(x, {get_max_multiple(x->get_first()->get_value())}, starting_multiple_); - if(auto *x = dynamic_cast(v)) - return populate_starting_multiple_gep(x); - if(auto *x = dynamic_cast(v)) - return populate_starting_multiple_splat(x); - if(auto *x = dynamic_cast(v)) - return populate_starting_multiple_reshape(x); - if(auto *x = dynamic_cast(v)) - return populate_starting_multiple_dequantize(x); - if(auto *x = dynamic_cast(v)) - return populate_starting_multiple_broadcast(x); - if(auto *x = dynamic_cast(v)) - return populate_starting_multiple_phi(x); - return populate_starting_multiple_default(v); -} - - -unsigned align::get(ir::value *v, unsigned ax) const { - unsigned starting_multiple = starting_multiple_.at(v)[ax]; - unsigned max_contiguous = max_contiguous_.at(v)[ax]; - return std::min(starting_multiple, max_contiguous); -} - -std::vector align::contiguous(ir::value* v) const { - return max_contiguous_.at(v); -} - -std::vector align::get_cst_info(ir::value* v) const { - return is_constant_.at(v); -} - - -void align::populate(ir::value *v) { - populate_is_constant(v); - populate_starting_multiple(v); - populate_max_contiguous(v); -} - -void align::run(ir::module &mod) { - ir::for_each_value(mod, [this](ir::value* v) { populate(v); } ); -// ir::for_each_value(mod, [this](ir::value* v) { -// if(dynamic_cast(v) || dynamic_cast(v)) -// std::cout << "ALIGN: " << v->get_name() << " " << max_contiguous_.at(v)[0] << " " << max_contiguous_.at(v)[1] << std::endl; -// }); -} - - -} -} -} diff --git a/lib/codegen/analysis/allocation.cc b/lib/codegen/analysis/allocation.cc deleted file mode 100644 index f842c0f61766..000000000000 --- a/lib/codegen/analysis/allocation.cc +++ /dev/null @@ -1,103 +0,0 @@ -#include -#include -#include "triton/codegen/analysis/layout.h" -#include "triton/codegen/analysis/allocation.h" -#include "triton/codegen/analysis/liveness.h" -#include "triton/ir/utils.h" - -namespace triton{ -namespace codegen{ -namespace analysis{ - - -void allocation::run(ir::module &mod) { - using std::max; - using std::min; - typedef std::multimap triples_map_type; - - std::vector I; - for(auto x: liveness_->get()) - I.push_back(x.first); - std::vector J = I; - - triples_map_type H; - H.insert({0, segment{0, INT_MAX}}); - - std::vector V; - std::map starts; - while(!J.empty()){ - auto h_it = H.begin(); - unsigned w = h_it->first; - segment xh = h_it->second; - H.erase(h_it); - auto j_it = std::find_if(J.begin(), J.end(), [&](shared_layout* JJ){ - segment xj = liveness_->get(JJ); - bool res = xj.intersect(xh); - for(auto val: H) - res = res && !val.second.intersect(xj); - return res; - }); - if(j_it != J.end()){ - unsigned size = (*j_it)->get_size(); - segment xj = liveness_->get(*j_it); - starts[*j_it] = w; - H.insert({w + size, segment{max(xh.start, xj.start), min(xh.end, xj.end)}}); - if(xh.start < xj.start) - H.insert({w, segment{xh.start, xj.end}}); - if(xj.end < xh.end) - H.insert({w, segment{xj.start, xh.end}}); - V.push_back(*j_it); - J.erase(j_it); - } - } - // Build interference graph - std::map> interferences; - for(shared_layout* x: V) - for(shared_layout* y: V){ - if(x == y) - continue; - unsigned X0 = starts[x], Y0 = starts[y]; - unsigned NX = x->get_size(); - unsigned NY = y->get_size(); - segment XS = {X0, X0 + NX}; - segment YS = {Y0, Y0 + NY}; - if(liveness_->get(x).intersect(liveness_->get(y)) - && XS.intersect(YS)) - interferences[x].insert(y); - } - // Initialize colors - std::map colors; - for(shared_layout* X: V) - colors[X] = (X==V[0])?0:-1; - // First-fit graph coloring - std::vector available(V.size()); - for(shared_layout* x: V){ - // Non-neighboring colors are available - std::fill(available.begin(), available.end(), true); - for(shared_layout* Y: interferences[x]){ - int color = colors[Y]; - if(color >= 0) - available[color] = false; - } - // Assigns first available color - auto It = std::find(available.begin(), available.end(), true); - colors[x] = std::distance(available.begin(), It); - } - // Finalize allocation - for(shared_layout* x: V){ - unsigned Adj = 0; - for(shared_layout* y: interferences[x]) - Adj = std::max(Adj, starts[y] + y->get_size()); - offsets_[x] = starts[x] + colors[x] * Adj; - } - // Save maximum size of induced memory space - allocated_size_ = 0; - for(shared_layout* x: V){ - allocated_size_ = std::max(allocated_size_, starts[x] + x->get_size()); - // std::cout << "start: " << starts[x] << " | end: " << starts[x] + x->get_size() << std::endl; - } -} - -} -} -} diff --git a/lib/codegen/analysis/axes.cc b/lib/codegen/analysis/axes.cc deleted file mode 100644 index 9e941fee636d..000000000000 --- a/lib/codegen/analysis/axes.cc +++ /dev/null @@ -1,174 +0,0 @@ -#include "triton/codegen/analysis/axes.h" -#include "triton/ir/utils.h" -#include "triton/ir/instructions.h" -#include "triton/ir/type.h" -#include - - -namespace triton{ -namespace codegen{ -namespace analysis{ - -axes::axes() {} - -void axes::update_graph_reduce(ir::instruction *i) { - auto* red = static_cast(i); - unsigned axis = red->get_axis(); - ir::value *arg = red->get_operand(0); - auto in_shapes = arg->get_type()->get_block_shapes(); - unsigned current = 0; - for(unsigned d = 0; d < in_shapes.size(); d++){ - if(d == axis) - continue; - graph_.add_edge({i, current++}, {arg, d}); - } -} - -void axes::update_graph_reshape(ir::instruction *i) { - auto* reshape = static_cast(i); - // operands - ir::value *op = reshape->get_operand(0); - // shapes - auto op_shapes = op->get_type()->get_block_shapes(); - auto res_shapes = reshape->get_type()->get_block_shapes(); - // construct edges - unsigned current = 0; - bool is_skewed = false; - for(unsigned d = 0; d < res_shapes.size(); d ++){ - bool same_shape = res_shapes[d] == op_shapes[current]; - // either add edge between axis or just add a node in the graph - if(!is_skewed && same_shape) - graph_.add_edge({i, d}, {op, current++}); - else - graph_.add_edge({i, d}, {i, d}); - // reshaping is skewed - if(res_shapes[d] > 1 && !same_shape) - is_skewed = true; - } -} - -void axes::update_graph_trans(ir::instruction *i) { - auto *trans = static_cast(i); - ir::value *op = trans->get_operand(0); - auto perm = trans->get_perm(); - // add edge between axis perm[d] and axis d - for(unsigned d = 0; d < perm.size(); d++) - graph_.add_edge({i, perm[d]}, {op, d}); -} - -void axes::update_graph_dequantize(ir::instruction *i) { - auto *dequantize = static_cast(i); - auto shapes = dequantize->get_type()->get_block_shapes(); - ir::value *op = dequantize->get_operand(0); - - // add edge except the last axis - for(unsigned d = 0; d < shapes.size() - 1; d ++){ - graph_.add_edge({i, d}, {op, d}); - } -} - -void axes::update_graph_broadcast(ir::instruction *i) { - auto *broadcast = static_cast(i); - auto shapes = broadcast->get_type()->get_block_shapes(); - ir::value *op = broadcast->get_operand(0); - ir::type *op_ty = op->get_type(); - const auto& op_shapes = op_ty->get_block_shapes(); - // add edge between non-broadcast axes - for(unsigned d = 0; d < shapes.size(); d ++) - if(op_shapes[d] == shapes[d]) - graph_.add_edge({i, d}, {op, d}); -} - -void axes::update_graph_dot(ir::instruction *i) { - auto *dot = static_cast(i); - auto shapes = dot->get_type()->get_block_shapes(); - ir::value *A = dot->get_operand(0); - ir::value *B = dot->get_operand(1); - ir::value *D = dot->get_operand(2); - // add edges between result and accumulator - for(unsigned d = 0; d < shapes.size(); d++) - graph_.add_edge({dot, d}, {D, d}); -} - -void axes::update_graph_elementwise(ir::instruction *i, - bool is_masked_load_async) { - if(i->get_num_operands() == 0) - return; - ir::value *op = i->get_operand(0); - if(!op->get_type()->is_block_ty()) - return; - auto rank = op->get_type()->get_tile_rank(); - for(unsigned d = 0; d < rank; d++) { - // If we are dealing with a masked async load we need to attach the - // dimensions so we match the behaviour of the copy_to_shared instruction - // which async masked load replaces. - if (is_masked_load_async) { - graph_.add_edge({i, d}, {i, d}); - } - - for(ir::value* opx: i->ops()) - for(ir::value* opy: i->ops()) { - if(!is_masked_load_async && !i->get_type()->is_void_ty()) - graph_.add_edge({i, d}, {opx, d}); - graph_.add_edge({opx, d}, {opy, d}); - } - } -} - -void axes::update_graph_no_edge(ir::instruction *i) { - if(!i->get_type()->is_block_ty()) - return; - auto rank = i->get_type()->get_tile_rank(); - for(unsigned d = 0; d < rank; d++) - graph_.add_edge({i, d}, {i, d}); -} - -void axes::update_graph(ir::instruction *i) { - switch (i->get_id()) { - case ir::INST_REDUCE: return update_graph_reduce(i); - case ir::INST_RESHAPE: return update_graph_reshape(i); - case ir::INST_SPLAT: return update_graph_no_edge(i); - case ir::INST_CAT: return update_graph_elementwise(i, true); - case ir::INST_TRANS: return update_graph_trans(i); - case ir::INST_DEQUANTIZE: return update_graph_dequantize(i); - case ir::INST_BROADCAST: return update_graph_broadcast(i); - case ir::INST_DOT: return update_graph_dot(i); - case ir::INST_COPY_TO_SHARED: return update_graph_no_edge(i); - case ir::INST_MASKED_LOAD_ASYNC: return update_graph_elementwise(i, true); - case ir::INST_COPY_FROM_SHARED: return update_graph_no_edge(i); - case ir::INST_CVT_LAYOUT: return update_graph_no_edge(i); - default: return update_graph_elementwise(i); - } - return; -} - - -int axes::get(ir::value *value, unsigned dim) { - return axes_.at({value, dim}); -} - -std::vector axes::get(ir::value *value) { - std::vector result; - for(size_t d = 0; d < value->get_type()->get_tile_rank(); d++) - result.push_back(this->get(value, d)); - return result; -} - -void axes::run(ir::module &mod) { - // make graph - graph_.clear(); - axes_.clear(); - ir::for_each_instruction(mod, [this](ir::instruction *x) { - update_graph(x); - }); - // find connected components - graph_.connected_components(nullptr, &axes_); - std::set uniq; - for(auto x: axes_) - uniq.insert(x.second); -} - -} -} - -} diff --git a/lib/codegen/analysis/layout.cc b/lib/codegen/analysis/layout.cc deleted file mode 100644 index 5eda98c4b7c4..000000000000 --- a/lib/codegen/analysis/layout.cc +++ /dev/null @@ -1,722 +0,0 @@ -#include -#include -#include -#include "triton/codegen/analysis/axes.h" -#include "triton/codegen/analysis/align.h" -#include "triton/codegen/analysis/layout.h" -#include "triton/ir/function.h" -#include "triton/ir/module.h" -#include "triton/ir/utils.h" -// #include "triton/ir/type.h" - -namespace triton{ -namespace codegen{ -namespace analysis{ - -/* -------------------------------- * - * Helper Functions * - * -------------------------------- */ - -inline unsigned clamp(unsigned x, unsigned a, unsigned b) { - unsigned lo = std::min(a, b); - unsigned hi = std::max(a, b); - return std::min(std::max(x, lo), hi); -} - -inline bool is_hmma_c(ir::value *v, int sm){ - bool result = false; - if(auto *x = dynamic_cast(v)){ - ir::value *a = x->get_operand(0); - ir::type *a_ty = a->get_type(); - ir::value *b = x->get_operand(1); - ir::type *b_ty = b->get_type(); - result = (a_ty->get_scalar_ty()->is_fp16_ty() && b_ty->get_scalar_ty()->is_fp16_ty()) || - (a_ty->get_scalar_ty()->is_bf16_ty() && b_ty->get_scalar_ty()->is_bf16_ty()) || - (a_ty->get_scalar_ty()->is_fp32_ty() && b_ty->get_scalar_ty()->is_fp32_ty() && - x->allow_tf32() && sm >= 80) || - (a_ty->get_scalar_ty()->is_integer_ty(8) && b_ty->get_scalar_ty()->is_integer_ty(8) && - sm >= 80); - } - return result; -} - -static mma_layout::TensorCoreType get_mma_type(ir::value *v) { - mma_layout::TensorCoreType mma_type; - if (auto* dot = dynamic_cast(v)) { - ir::value* a = dot->get_operand(0); - ir::value* b = dot->get_operand(1); - ir::type* a_ty = a->get_type(); - ir::type* b_ty = b->get_type(); - ir::type* c_ty = v->get_type(); - - if (c_ty->get_scalar_ty()->is_fp32_ty()) { - // floating point tensor cores - if (a_ty->get_scalar_ty()->is_fp16_ty() && b_ty->get_scalar_ty()->is_fp16_ty()) { - mma_type = mma_layout::FP32_FP16_FP16_FP32; - return mma_type; - } - if (a_ty->get_scalar_ty()->is_bf16_ty() && b_ty->get_scalar_ty()->is_bf16_ty()) { - mma_type = mma_layout::FP32_BF16_BF16_FP32; - return mma_type; - } - if (a_ty->get_scalar_ty()->is_fp32_ty() && b_ty->get_scalar_ty()->is_fp32_ty() - && dot->allow_tf32()) { - mma_type = mma_layout::FP32_TF32_TF32_FP32; - return mma_type; - } - } else if (c_ty->get_scalar_ty()->is_integer_ty(32)) { - // throw std::runtime_error("integer tensor cores are not yet supported"); - // // integer tensor cores - // if (a_ty->get_scalar_ty()->is_integer_ty(1) && b_ty->get_scalar_ty()->is_integer_ty(1)) { - // mma_type = mma_layout::INT32_INT1_INT1_INT32; - // return mma_type; - // } - // if (a_ty->get_scalar_ty()->is_integer_ty(4) && b_ty->get_scalar_ty()->is_integer_ty(4)) { - // mma_type = mma_layout::INT32_INT4_INT4_INT32; - // return mma_type; - // } - if (a_ty->get_scalar_ty()->is_integer_ty(8) && b_ty->get_scalar_ty()->is_integer_ty(8)) { - mma_type = mma_layout::INT32_INT8_INT8_INT32; - return mma_type; - } - } - } - return mma_layout::NOT_APPLICABLE; -} - -inline void extract_io_use(ir::value *v, std::set& result) { - for(ir::user* u: v->get_users()){ - auto i = dynamic_cast(u); - if(i && i->get_pointer_operand() == v) - result.insert(v); - } -} - -inline void extract_dot_use(ir::value *v, ir::value*& result, size_t n) { - for(ir::user* u: v->get_users()){ - auto i = dynamic_cast(u); - if(i && i->get_operand(n) == v) - result = v; - } -} - -inline void extract_hmma_dot_use(ir::value *v, ir::value*& result, size_t n, int sm) { - for(ir::user* u: v->get_users()){ - auto i = dynamic_cast(u); - if(i && is_hmma_c(i, sm) && i->get_operand(n) == v) { - result = i; - } - } -} - - -inline bool is_trans(ir::value *v) { - if(dynamic_cast(v)) { - return true; - } - if(auto *phi = dynamic_cast(v)) { - bool result = true; - for(ir::value *op: phi->ops()) - result = result && is_trans(op); - return result; - } - return false; -} - - -/* -------------------------------- * - * Layout Visitor * - * -------------------------------- */ - -void layout_visitor::visit_layout(data_layout *layout) { - layout->accept(this); -} - - -/* -------------------------------- * - * Base Data Layout * - * -------------------------------- */ - -data_layout::data_layout(id_t id, - const std::vector &axes, - const std::vector &shape, - const std::vector &values, - analysis::align* align): id_(id), axes_(axes), shape_(shape), values_(values) { - // io pointer - std::set ptr; - for(ir::value* v: values_) - extract_io_use(v, ptr); - order_.resize(axes_.size()); - std::iota(order_.begin(), order_.end(), 0); - std::vector max_contiguous; - for(ir::value* p: ptr){ - std::vector curr = align->contiguous(p); - if(curr.size() > max_contiguous.size()) - max_contiguous = curr; - else if(curr.size() == max_contiguous.size()){ - if(*std::max_element(curr.begin(), curr.end()) > *std::max_element(max_contiguous.begin(), max_contiguous.end())) - max_contiguous = curr; - } - } - if(max_contiguous.size() > 0){ - std::sort(order_.begin(), order_.end(), [&](unsigned a, unsigned b) { - return max_contiguous[a] > max_contiguous[b]; - }); -// std::cout << max_contiguous[0] << " " << max_contiguous[1] << std::endl; -// std::cout << order_[0] << " " << order_[1] << std::endl; - } -} - -int data_layout::find_axis(int to_find) const { - auto it = std::find(axes_.begin(), axes_.end(), to_find); - if(it == axes_.end()) - return -1; - return std::distance(axes_.begin(), it); -} - - -distributed_layout::distributed_layout(id_t id, - const std::vector &axes, - const std::vector &shape, - const std::vector &values, - analysis::align* align): data_layout(id, axes, shape, values, align) -{ } - -/* -------------------------------- * - * MMA Layout * - * -------------------------------- */ - -mma_layout::mma_layout(size_t num_warps, - const std::vector& axes, - const std::vector& shape, - const std::vector &values, - analysis::align* align, target* tgt, - shared_layout *layout_a, shared_layout *layout_b, - ir::value *dot): distributed_layout(MMA, axes, shape, values, align) { - tensor_core_type_ = get_mma_type(dot); - /* fragments per warp */ - // try to make things as square as possible to maximize data re-use - if(tgt->as_nvidia()->sm() < 80){ - fpw_ = {2, 2, 1}; - auto ord_a = layout_a->get_order(); - auto ord_b = layout_b->get_order(); - bool is_a_row = ord_a[0] != 0; - bool is_b_row = ord_b[0] != 0; - bool is_a_vec4 = !is_a_row && (layout_a->get_shape()[ord_a[0]] <= 16); - bool is_b_vec4 = is_b_row && (layout_b->get_shape()[ord_b[0]] <= 16); - int pack_size_0 = (is_a_row || is_a_vec4) ? 1 : 2; - int pack_size_1 = (is_b_row && !is_b_vec4) ? 2 : 1; - rep_ = {2*pack_size_0, 2*pack_size_1, 1}; - spw_ = {fpw_[0]*4*rep_[0], fpw_[1]*4*rep_[1], 1}; - contig_per_thread_ = {1, 1}; - order_ = {0, 1}; - } - else{ - spw_ = mma_instr_shape_.at(tensor_core_type_); // e.g., {16, 8, 16} for f32.f16.f16.f32 - contig_per_thread_ = {1, 2}; - order_ = {1, 0}; - } - - /* warps per tile */ - wpt_ = {1, 1, 1}; - // try to make warp-level tiles as square as possible to maximize data re-use - if (tgt->as_nvidia()->sm() < 80) { - std::vector wpt_nm1; - do{ - wpt_nm1 = wpt_; - if(wpt_[0] * wpt_[1] * wpt_[2] < num_warps) - wpt_[0] = clamp(wpt_[0]*2, 1, shape_[0] / spw_[0]); - if(wpt_[0] * wpt_[1] * wpt_[2] < num_warps) - wpt_[1] = clamp(wpt_[1]*2, 1, shape_[1] / spw_[1]); - }while(wpt_nm1 != wpt_); - } else { - bool changed = false; - // try to have a warp own entire rows of the output - // this makes it easier to fuse multiple mmas by fusing - // registers - bool one_warp_per_row = false; - for(ir::value* v: values) - for(ir::user* u: v->get_users()){ - auto* dot = dynamic_cast(u); - auto* cts = dynamic_cast(u); - if((dot && dot->get_operand(2)!=v) || !layout_a->to_shared() || cts) - one_warp_per_row = shape[0] / spw_[0] >= num_warps; - } - // std::cout << one_warp_per_row << std::endl; - - if(one_warp_per_row){ - wpt_[1] = 1; - wpt_[0] = num_warps; - } - else{ - do { - changed = false; - if (wpt_[0] * wpt_[1] * wpt_[2] >= num_warps) - break; - if (shape_[0] / spw_[0] / wpt_[0] >= shape_[1] / (spw_[1]*2) / wpt_[1]) { - if (wpt_[0] < shape_[0] / spw_[0]) { - wpt_[0] *= 2; - changed = true; - } - } else { - if (wpt_[1] < shape_[1] / (spw_[1]*2)) { - wpt_[1] *= 2; - changed = true; - } - } - } while(changed); - } - } - - // std::cout << wpt_[0] << " " << wpt_[1] << std::endl; - - /* shape per block */ - shape_per_cta_ = {spw_[0]*wpt_[0], spw_[1]*wpt_[1], 1}; -} - - -/* -------------------------------- * - * Scanline Layout * - * -------------------------------- */ - -scanline_layout::scanline_layout(size_t num_warps, - const std::vector& axes, - const std::vector& shape, - const std::vector &values, - analysis::align* align, target *tgt): distributed_layout(SCANLINE, axes, shape, values, align){ - unsigned size = std::accumulate(shape_.begin(), shape_.end(), 1, std::multiplies()); - unsigned num_threads = tgt->is_gpu() ? num_warps * 32 : 1; - nts_.resize(shape_.size()); - mts_.resize(shape_.size()); - bool is_dot = std::any_of(values.begin(), values.end(), - [&](ir::value* v) { return dynamic_cast(v); }); - - std::vector ptrs; - for(ir::value *v: values) - for(ir::user *usr: v->get_users()) - if(auto *io = dynamic_cast(usr)){ - if(ptrs.empty() || ptrs[0]->get_type()->get_tile_rank() <= io->get_pointer_operand()->get_type()->get_tile_rank()) - ptrs.push_back(io->get_pointer_operand()); - } - - unsigned i = order_[0]; - int contiguous = 1; - for(ir::value* ptr: ptrs){ - int nbits = ptr->get_type()->get_pointer_element_ty()->get_scalar_ty()->get_primitive_size_in_bits(); - contiguous = std::max(contiguous, std::min(align->get(ptr, i), 128 / nbits)); - } - - nts_[i] = clamp(size / num_threads, 1, std::min(contiguous, shape_[i])); - mts_[i] = clamp(num_threads, 1, shape_[i] / nts_[i]); - size /= shape_[i]; - num_threads /= mts_[i]; - if(is_dot) - nts_[order_[1]] = clamp(size / num_threads, 1, std::min(4, shape_[order_[1]])); - for(size_t d = 1; d < shape_.size(); d++){ - i = order_[d]; - if(d > 1 || !is_dot) - nts_[i] = 1; - mts_[i] = clamp(num_threads, 1, shape_[i] / nts_[i]); - num_threads = num_threads / mts_[i]; - } - - shape_per_cta_.resize(shape_.size()); - for(size_t d = 0; d < shape_.size(); d++) - shape_per_cta_[d] = mts_[d]*nts_[d]; -} - - -/* -------------------------------- * - * Shared Layout * - * -------------------------------- */ - -bool shared_layout::is_loop_latch(ir::phi_node *phi, ir::instruction *terminator){ - if(phi->get_parent() != terminator->get_parent()) - return false; - if(auto *br = dynamic_cast(terminator)) - return br->get_true_dest() == phi->get_parent() - || br->get_false_dest() == phi->get_parent(); - else if(dynamic_cast(terminator)) - return false; - else - throw std::runtime_error("unreachable"); -} - - -void shared_layout::extract_double_bufferable(ir::value *v, std::shared_ptr& res) { - auto* phi = dynamic_cast(v); - if(!phi || phi->get_num_incoming() != 2) - return; - ir::basic_block *block_0 = phi->get_incoming_block(0); - ir::basic_block *block_1 = phi->get_incoming_block(1); - ir::instruction *terminator_0 = block_0->get_inst_list().back(); - ir::instruction *terminator_1 = block_1->get_inst_list().back(); - bool is_latch_0 = is_loop_latch(phi, terminator_0); - bool is_latch_1 = is_loop_latch(phi, terminator_1); - ir::value *value_0 = phi->get_incoming_value(0); - ir::value *value_1 = phi->get_incoming_value(1); - ir::instruction *i_0 = dynamic_cast(value_0); - ir::instruction *i_1 = dynamic_cast(value_1); - if(!(i_0 && !i_1) && - !(dynamic_cast(i_0) && dynamic_cast(i_1)) && - !(dynamic_cast(i_0) && dynamic_cast(i_1))) - return; - if(is_latch_1) - res.reset(new double_buffer_info_t{value_0, value_1, phi}); - if(is_latch_0) - res.reset(new double_buffer_info_t{value_1, value_0, phi}); -} - -static bool is_smem_in(ir::value* v, const ir::basic_block* bb) { - if (ir::instruction *instr = dynamic_cast(v)) { - if (instr->get_parent() != bb) - return false; - if (dynamic_cast(v) || - dynamic_cast(v)) { - return true; - } - } - return false; -} - -/// param: -/// value_1: next_value -static bool is_multistage_pipe_phi(ir::phi_node* phi, ir::basic_block* bb0, ir::basic_block* bb1, - std::vector& values_0, ir::value*& value_1) { - ir::value* next = phi; - while (auto cphi = dynamic_cast(next)) { - // smem from previous bb & phi/smem from current bb - ir::value* c0 = cphi->get_incoming_value(0); - ir::value* c1 = cphi->get_incoming_value(1); - ir::basic_block *cbb0 = cphi->get_incoming_block(0); - ir::basic_block *cbb1 = cphi->get_incoming_block(1); - - if (is_smem_in(c0, cbb0)) { - assert(cbb0 == bb0); - values_0.push_back(c0); - if (auto phi1 = dynamic_cast(c1)) { - next = phi1; - continue; - } else { - if (is_smem_in(c1, cbb1)) { - value_1 = c1; - assert(cbb1 == bb1); - return true; - } else { - return false; - } - } - } else - return false; - } - return false; -} - -void shared_layout::extract_N_bufferable(ir::value *v, std::shared_ptr &res, int &prev_stages) { - auto* phi = dynamic_cast(v); - // if the phi node is nested - if (!phi) - return; - - ir::basic_block *bb0 = phi->get_incoming_block(0); - ir::basic_block *bb1 = phi->get_incoming_block(1); - - std::vector values_0; - ir::value* value_1; - - if (!is_multistage_pipe_phi(phi, bb0, bb1, values_0, value_1)) - return; - - // double-buffer is a special case - if (values_0.size() == 1) - return; - - // compute original values_0 input order - std::map order; - int idx = 0; - for (ir::instruction* instr : *bb0) { - if (std::find(values_0.begin(), values_0.end(), instr) != values_0.end()) - order[static_cast(instr)] = idx++; - } - assert(order.size() == values_0.size() && "order size incorrect"); - - int curr_stages = values_0.size() + 1; - if (curr_stages > prev_stages) { - res.reset(new N_buffer_info_t{values_0, value_1, phi, order}); - prev_stages = curr_stages; - } -} - - -shared_layout::shared_layout(data_layout *arg, - const std::vector& axes, - const std::vector& shape, - const std::vector &values, - ir::type *ty, - analysis::align* align, target *tgt, bool is_tmp) - : data_layout(SHARED, axes, shape, values, align), ty_(ty), tgt_(tgt), is_tmp_(is_tmp){ - - size_ = 0; - arg_layout_ = arg; - - // N-stage buffering - int prev_stages = 0; - for (ir::value *v : values) - extract_N_bufferable(v, N_buffer_, prev_stages); - - // double-buffering - if (!N_buffer_) - for(ir::value *v: values) - extract_double_bufferable(v, double_buffer_); - - // order - std::vector arg_order = arg ? arg->get_order() : std::vector{0}; - order_ = arg_order; - - ir::value* dot_a = nullptr; - ir::value* dot_b = nullptr; - ir::value* hmma_dot_a = nullptr; - ir::value* hmma_dot_b = nullptr; - for(ir::value* v: values){ - extract_dot_use(v, dot_a, 0); - extract_dot_use(v, dot_b, 1); - extract_hmma_dot_use(v, hmma_dot_a, /*op*/0, tgt_->as_nvidia()->sm()); - extract_hmma_dot_use(v, hmma_dot_b, /*op*/1, tgt_->as_nvidia()->sm()); - } - hmma_dot_a_ = hmma_dot_a; - hmma_dot_b_ = hmma_dot_b; - - // Update mma_vec - if (hmma_dot_a_) { - assert(order_.size() == 2); - std::vector mat_shape = mma_layout::mma_mat_shape_.at(get_mma_type(hmma_dot_a_)); - mma_vec_ = order_[0] == 1 ? mat_shape[2] : mat_shape[0]; // k : m - mma_strided_ = order_[0] == 1 ? mat_shape[0] : mat_shape[2]; - - // for now, disable swizzle when using lds.8 - if (get_mma_type(hmma_dot_a_) == mma_layout::INT32_INT8_INT8_INT32) - if (order_[0] == 0) // need transpose - allow_swizzle_ = false; - } else if (hmma_dot_b_) { - assert(order_.size() == 2); - std::vector mat_shape = mma_layout::mma_mat_shape_.at(get_mma_type(hmma_dot_b_)); - mma_vec_ = order_[0] == 1 ? mat_shape[1] : mat_shape[2]; // n : k - mma_strided_ = order_[0] == 1 ? mat_shape[2] : mat_shape[1]; - - // for now, disable swizzle when using lds.8 - if (get_mma_type(hmma_dot_b_) == mma_layout::INT32_INT8_INT8_INT32) - if (order_[0] == 1) // need transpose - allow_swizzle_ = false; - } - - // size - size_ = ty_->get_primitive_size_in_bits() / 8; - for(auto s: shape_) - size_ *= s; - if(double_buffer_) - size_ *= 2; - if (N_buffer_) { - size_ *= (N_buffer_->firsts.size() + 1); - } -} - -int shared_layout::get_num_stages() const { - if (double_buffer_) - return 2; - if (N_buffer_) - return N_buffer_->firsts.size() + 1; - return 1; -} - -size_t shared_layout::get_per_stage_elements() const { - return get_per_stage_size()/(ty_->get_primitive_size_in_bits()/8); -} - -/* -------------------------------- * - * ---- Layouts Inference Pass ---- * - * -------------------------------- */ - -layouts::layouts(analysis::axes *axes, analysis::align *align, size_t num_warps, target* tgt) - : axes_(axes), align_(align), num_warps_(num_warps), tgt_(tgt){ } - - -void layouts::connect(ir::value *x, ir::value *y) { - if(x == y) - return; - if(!x->get_type()->is_block_ty()) - return; - if(!y->get_type()->is_block_ty()) - return; - std::vector x_axes = axes_->get(x); - std::vector y_axes = axes_->get(y); - std::set sx_axes(x_axes.begin(), x_axes.end()); - std::set sy_axes(y_axes.begin(), y_axes.end()); - std::set common; - std::set_intersection(sx_axes.begin(), sx_axes.end(), - sy_axes.begin(), sy_axes.end(), - std::inserter(common, common.begin())); - graph_.add_edge(x, x); - graph_.add_edge(y, y); - if(!common.empty()) - graph_.add_edge(x, y); -} - -void layouts::make_graph(ir::instruction *i) { - for(ir::value* opx: i->ops()) - for(ir::value* opy: i->ops()){ - connect(i, opx); - connect(opx, opy); - } -} - -void layouts::create(size_t id, const std::vector& values) { -// if(layouts_.find(id) != layouts_.end()) -// return; - auto it_hmma_c = std::find_if(values.begin(), values.end(), - [&](ir::value* v){ return is_hmma_c(v, tgt_->as_nvidia()->sm()); }); - auto cmp = [](ir::value* x, ir::value *y) { - std::pair xx = {x->get_type()->get_tile_rank(), x->get_type()->get_tile_num_elements()}; - std::pair yy = {y->get_type()->get_tile_rank(), y->get_type()->get_tile_num_elements()}; - return xx < yy; - }; - std::vector lvalue = values; - std::remove_if(lvalue.begin(), lvalue.end(), [&](ir::value* v) { return dynamic_cast(v); }); - ir::value *largest = *std::max_element(lvalue.begin(), lvalue.end(), cmp); - const auto& axes = axes_->get(largest); - const auto& shapes = largest->get_type()->get_block_shapes(); - auto it_cts = std::find_if(values.begin(), values.end(), [](ir::value* v) { - return dynamic_cast(v) || - dynamic_cast(v); - }); - // type - if(it_hmma_c != values.end()){ - ir::instruction *dot = (ir::instruction*)*it_hmma_c; - ir::value *a = dot->get_operand(0); - ir::value *b = dot->get_operand(1); - create(groups_.at(a), values_.at(groups_.at(a))); - create(groups_.at(b), values_.at(groups_.at(b))); - layouts_[id] = new mma_layout(num_warps_, axes, shapes, values, align_, tgt_, - (shared_layout*)layouts_.at(groups_.at(a)), - (shared_layout*)layouts_.at(groups_.at(b)), - dot); - } - else if(it_cts != values.end()){ - ir::instruction *cts = (ir::instruction*)*it_cts; - ir::value *arg = cts->get_operand(0); - create(groups_.at(arg), values_.at(groups_.at(arg))); - layouts_[id] = new shared_layout(get(arg), axes, shapes, values, largest->get_type()->get_scalar_ty(), align_, tgt_); - } - else{ - layouts_[id] = new scanline_layout(num_warps_, axes, shapes, values, align_, tgt_); - } -} - -// layout checkers -bool layouts::is_scanline(ir::instruction *i) { - return this->get(i->get_operand(0))->to_scanline() != nullptr; -} - -bool layouts::is_coalesced_scanline(ir::instruction *i) { - if (auto *red = dynamic_cast(i)) { - auto *scanline = this->get(i->get_operand(0))->to_scanline(); - return scanline && scanline->get_order()[0] == red->get_axis(); - } - return false; -} - -bool layouts::is_mma(ir::instruction *i) { - return this->get(i->get_operand(0))->to_mma() != nullptr; -} - -bool layouts::is_a100_mma(ir::instruction *i) { - if (auto *red = dynamic_cast(i)) { - return is_mma(red) && (tgt_->as_nvidia()->sm() >= 80) && - (red->get_axis() == 1); - } - return false; -} - -void layouts::create_tmp_layout(size_t id, data_layout *arg, - const std::vector &axes, - const std::vector &shape, - ir::instruction *i, bool is_index) { - ir::type *ty = is_index ? ir::type::get_int32_ty(i->get_type()->get_context()) - : i->get_type()->get_scalar_ty(); - layouts_[id] = new shared_layout(arg, axes, shape, {i}, ty, align_, tgt_, true); - if (is_index) { - tmp_index_[i] = id; - } else { - tmp_[i] = id; - } -} - -void layouts::run(ir::module &mod) { - // make graph - graph_.clear(); - layouts_.clear(); - groups_.clear(); - - ir::for_each_instruction(mod, [this](ir::instruction* i) { - make_graph(i); - }); - - - // connected components - graph_.connected_components(&values_, &groups_); - - // create layouts - for(const auto& x: values_) - create(x.first, x.second); - - // create temporaries - size_t id = values_.size(); - ir::for_each_instruction(mod, [this, &id](ir::instruction* i) { -// std::cout << "layout: " << std::endl; -// i->print(std::cout); - if(auto *red = dynamic_cast(i)) { - ir::value *arg = red->get_operand(0); - distributed_layout *layout = - dynamic_cast(get(arg)); - // shape - auto shapes = arg->get_type()->get_block_shapes(); - unsigned axis = red->get_axis(); - shapes[axis] = - layout->shape_per_cta(axis) / layout->contig_per_thread(axis); - // create layout - id++; - create_tmp_layout(id, layout, axes_->get(arg), shapes, red); - - if (red->with_index()) { - id++; - create_tmp_layout(id, layout, axes_->get(arg), shapes, red, true); - } - } - if(auto *val = dynamic_cast(i)){ - distributed_layout* out_layout = dynamic_cast(get(val)); - distributed_layout* in_layout = dynamic_cast(get(i->get_operand(0))); - size_t dim = val->get_type()->get_tile_rank(); - ir::type::block_shapes_t shape(dim); - for(size_t k = 0; k < dim; k++){ - shape[k] = std::max(in_layout->shape_per_cta(k), - out_layout->shape_per_cta(k)); - } - auto in_ord = in_layout->get_order(); - auto out_ord = out_layout->get_order(); - int in_vec = in_layout->contig_per_thread(in_ord[0]); - int out_vec = out_layout->contig_per_thread(out_ord[0]); - int pad = std::max(in_vec, out_vec); - shape[out_ord[0]] += pad; - id++; - create_tmp_layout(id, out_layout, axes_->get(val), shape, val); - } - if(auto *atom = dynamic_cast(i)){ - id++; - create_tmp_layout(id, nullptr, {}, {1}, atom); - } - }); - -} - -} -} -} diff --git a/lib/codegen/analysis/liveness.cc b/lib/codegen/analysis/liveness.cc deleted file mode 100644 index 535df4eb90af..000000000000 --- a/lib/codegen/analysis/liveness.cc +++ /dev/null @@ -1,124 +0,0 @@ -#include -#include -#include "triton/codegen/analysis/liveness.h" -#include "triton/codegen/analysis/layout.h" -#include "triton/ir/function.h" -#include "triton/ir/module.h" -#include "triton/ir/utils.h" - -namespace triton{ -namespace codegen{ -namespace analysis{ - - -void liveness::run(ir::module &mod) { - intervals_.clear(); - - std::map> layouts_map; - for(auto &x: layouts_->get_all()){ - shared_layout* layout = x.second->to_shared(); - if(!layout || layout->is_tmp()) - continue; - for(ir::value* v:layout->get_values()){ - layouts_map[v].insert(layout); - } - } - - - - std::map> live_in; - while(true){ - bool changed = false; - ir::instruction* last_inst = nullptr; - ir::for_each_instruction_backward(mod, [&](ir::instruction* i){ - // gen - std::set gen; - for(ir::value* v: i->ops()) - for(shared_layout* layout: layouts_map[v]) - gen.insert(layout); - // kill - std::set kill; - for(shared_layout* layout: layouts_map[i]) - kill.insert(layout); - // temporaries are handled separately - if(layouts_->has_tmp(i)){ - gen.insert(layouts_->get(layouts_->tmp(i))->to_shared()); - kill.insert(layouts_->get(layouts_->tmp(i))->to_shared()); - } - if(layouts_->has_tmp_index(i)){ - gen.insert(layouts_->get(layouts_->tmp_index(i))->to_shared()); - kill.insert(layouts_->get(layouts_->tmp_index(i))->to_shared()); - } - // live-out - std::set live_out; - std::vector succs = {last_inst}; - if(i == i->get_parent()->get_inst_list().back()) - for(ir::basic_block* succ: i->get_parent()->get_successors()) - succs.push_back(succ->get_inst_list().front()); - for(ir::instruction* succ: succs) - for(shared_layout* layout: live_in[succ]) - if(!layout->is_tmp()) - live_out.insert(layout); - - // new sets - std::set live_out_minus_kill; - std::set_difference(live_out.begin(), live_out.end(), kill.begin(), kill.end(), - std::inserter(live_out_minus_kill, live_out_minus_kill.end())); - std::set new_live_in; - std::set_union(gen.begin(), gen.end(), live_out_minus_kill.begin(), live_out_minus_kill.end(), - std::inserter(new_live_in, new_live_in.end())); - - changed = changed || (new_live_in != live_in[i]); - live_in[i] = new_live_in; - last_inst = i; - }); - if(!changed) - break; - } - - // ir::for_each_instruction(mod, [&](ir::instruction* i){ - // i->print(std::cout); - // std::cout << " live_in: " << live_in[i].size() << std::endl; - // }); - - - - // Assigns index to each instruction - std::map indices; - slot_index index = 0; - ir::for_each_instruction(mod, [&](ir::instruction* instr){ - index += 1; - indices.insert({instr, index}); - }); - - - for(auto &x: layouts_->get_all()){ - shared_layout* layout = x.second->to_shared(); - if(layout) - intervals_[layout] = segment{INT32_MAX, 0}; - } - - for(auto& x: live_in) - for(shared_layout* layout: x.second) - intervals_[layout].start = std::min(intervals_[layout].start, indices[x.first]); - - for(auto& x: live_in) - for(shared_layout* layout: x.second){ - intervals_[layout].end = std::max(intervals_[layout].end, indices[x.first] + 1); - } - - - for(auto &x: layouts_->get_all()) { - shared_layout* layout = x.second->to_shared(); - if(!layout) - continue; - // std::cout << intervals_[layout].start << " " << intervals_[layout].end << std::endl; - } - - - -} - -} -} -} diff --git a/lib/codegen/analysis/swizzle.cc b/lib/codegen/analysis/swizzle.cc deleted file mode 100644 index 08843bbf78df..000000000000 --- a/lib/codegen/analysis/swizzle.cc +++ /dev/null @@ -1,64 +0,0 @@ -#include "triton/codegen/analysis/swizzle.h" -#include "triton/codegen/analysis/layout.h" -#include "triton/codegen/target.h" -#include "triton/ir/type.h" -#include - -namespace triton{ -namespace codegen{ -namespace analysis{ - - -void swizzle::run(ir::module &) { - per_phase_.clear(); - max_phase_.clear(); - - for(auto &x: layouts_->get_all()){ - shared_layout* layout = dynamic_cast(x.second); - if(!layout) - continue; - ir::value* mma_dot_a = layout->hmma_dot_a(); - ir::value* mma_dot_b = layout->hmma_dot_b(); - - if(!mma_dot_a && !mma_dot_b){ - per_phase_[layout] = 1; - max_phase_[layout] = 1; - vec_[layout] = 1; - continue; - } - auto ord = layout->get_order(); - scanline_layout* in_layout = dynamic_cast(layout->get_arg_layout()); - int per_phase = 1; - int dtsize = layout->get_type()->get_scalar_ty()->get_primitive_size_in_bits() / 8; - if(in_layout) - per_phase = std::max(128 / (in_layout->mts(ord[0])*in_layout->nts(ord[0])*dtsize), 1); - else - per_phase = 1; - if(tgt_->as_nvidia() && tgt_->as_nvidia()->sm() < 80){ - int inner = mma_dot_a ? 0 : 1; - per_phase_[layout] = per_phase; - max_phase_[layout] = (ord[inner] == 1 ? 8 : 4) / per_phase_[layout]; - if(mma_dot_a) - vec_[layout] = 2*layouts_->get(mma_dot_a)->to_mma()->rep(0); - else - vec_[layout] = 2*layouts_->get(mma_dot_b)->to_mma()->rep(1); - } - else { - if (!layout->allow_swizzle()) { - per_phase_[layout] = 1; - max_phase_[layout] = 1; - vec_[layout] = 1; - } else { - per_phase_[layout] = per_phase; - max_phase_[layout] = layout->get_mma_strided() / per_phase_[layout]; - vec_[layout] = layout->get_mma_vec(); - } - } - } -} - -} -} -} - - diff --git a/lib/codegen/extern_lib.cc b/lib/codegen/extern_lib.cc deleted file mode 100644 index 0a1f165ea443..000000000000 --- a/lib/codegen/extern_lib.cc +++ /dev/null @@ -1,63 +0,0 @@ -#include "triton/codegen/extern_lib.h" - -#include "llvm/IR/Constants.h" -#include "llvm/IR/LegacyPassManager.h" -#include "llvm/IR/Metadata.h" -#include "llvm/IR/Type.h" -#include "llvm/Linker/Linker.h" -#include "llvm/Transforms/IPO/PassManagerBuilder.h" -#include "triton/codegen/pass.h" - -namespace triton { - -namespace codegen { - -std::unique_ptr ExternLib::load(llvm::LLVMContext& ctx) { - llvm::SMDiagnostic err; - auto mod = llvm::parseIRFile(this->path_, err, ctx); - if (!mod) { - throw std::runtime_error("Failed to load extern lib " + this->name_ + - " at " + this->path_); - } - return mod; -} - -void ExternLib::link(std::unique_ptr& llvm, - std::unique_ptr& mod) { - // Set triple and data layout to match the target module - mod->setTargetTriple(llvm->getTargetTriple()); - mod->setDataLayout(llvm->getDataLayout()); - if (llvm::Linker::linkModules(*llvm, std::move(mod))) { - throw std::runtime_error("Failed to link extern lib " + this->name_ + - " at " + this->path_); - } -} - -void LibDevice::opt(llvm::LLVMContext& ctx, std::unique_ptr& llvm) { - // Add nvvm reflect flags to llvm module - // https://llvm.org/docs/LangRef.html#module-flags-metadata - // i32 4: Override the other module. - // i32 1: Emit an error - // If both modules specify Override, but the values differ, an error - // will be emitted. - llvm::Type* I32 = llvm::Type::getInt32Ty(ctx); - llvm::Metadata* md_four = - llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(I32, 4)); - llvm::Metadata* md_name = llvm::MDString::get(ctx, "nvvm-reflect-ftz"); - llvm::Metadata* md_one = - llvm::ConstantAsMetadata::get(llvm::ConstantInt::getSigned(I32, 1)); - llvm::MDNode* reflect = llvm::MDNode::get(ctx, {md_four, md_name, md_one}); - llvm->addModuleFlag(reflect); -} - -std::unique_ptr create_extern_lib(const std::string& lib_name, - const std::string& lib_path) { - if (lib_name == "libdevice") { - return std::make_unique(lib_name, lib_path); - } else { - throw std::runtime_error("Unknown external library: " + lib_name); - } -} - -} // namespace codegen -} // namespace triton diff --git a/lib/codegen/pass.cc b/lib/codegen/pass.cc deleted file mode 100644 index 1057cfef6e8d..000000000000 --- a/lib/codegen/pass.cc +++ /dev/null @@ -1,170 +0,0 @@ -#include "triton/codegen/pass.h" - -#include "llvm/IR/Constants.h" -#include "llvm/IR/LegacyPassManager.h" -#include "llvm/IR/Module.h" -#include "llvm/IR/Verifier.h" -#include "llvm/IRReader/IRReader.h" -#include "llvm/Linker/Linker.h" -#include "llvm/Support/SourceMgr.h" -#include "llvm/Transforms/IPO.h" -#include "llvm/Transforms/IPO/PassManagerBuilder.h" -#include "triton/codegen/analysis/align.h" -#include "triton/codegen/analysis/allocation.h" -#include "triton/codegen/analysis/axes.h" -#include "triton/codegen/analysis/liveness.h" -#include "triton/codegen/analysis/swizzle.h" -#include "triton/codegen/selection/generator.h" -#include "triton/codegen/transform/coalesce.h" -#include "triton/codegen/transform/cts.h" -#include "triton/codegen/transform/dce.h" -#include "triton/codegen/transform/disassociate.h" -#include "triton/codegen/transform/inline.h" -#include "triton/codegen/transform/membar.h" -#include "triton/codegen/transform/peephole.h" -#include "triton/codegen/transform/pipeline.h" -#include "triton/codegen/transform/prefetch.h" -#include "triton/ir/function.h" -#include "triton/ir/module.h" -#include "triton/ir/print.h" - -namespace triton { -namespace codegen { - -static void link_extern_libs(const ExternLibMap& user_extern_lib_map, - const ExternLibMap& target_extern_lib_map, - ir::module& ir, llvm::LLVMContext& ctx, - std::unique_ptr& llvm) { - for (const auto& iter : target_extern_lib_map) { - auto &lib_name = iter.first; - if (user_extern_lib_map.count(lib_name) != 0 && - user_extern_lib_map.at(lib_name)->path() != "") { - // If the user specified a path for this library, use it. - user_extern_lib_map.at(lib_name)->install(ctx, llvm); - } else { - // Otherwise, use the default path. - iter.second->install(ctx, llvm); - } - } - - std::set function_names; - for (auto& func : ir.get_function_list()) { - function_names.insert(func->get_name()); - } - llvm::legacy::PassManager pass; - pass.add(llvm::createInternalizePass([&](const llvm::GlobalValue& v) -> bool { - if (function_names.count(v.getName()) != 0) { - // Preserve global functions - return true; - } - // Internalize all device functions - return false; - })); - - llvm::legacy::PassManager pm; - pm.add(llvm::createVerifierPass()); - pm.run(*llvm); - - llvm::PassManagerBuilder builder; - builder.OptLevel = 3; - builder.SizeLevel = 0; - builder.populateModulePassManager(pass); - - pass.run(*llvm); -} - -// TODO: -// There should be a proper pass manager there! -std::unique_ptr add_passes_to_emit_bin( - ir::module& ir, llvm::LLVMContext& ctx, codegen::target* target, - int num_warps, int num_stages, int& shared_static, - const ExternLibMap& extern_lib_map) { - // generate llvm code - std::string name = ir.get_function_list()[0]->get_name(); - std::unique_ptr llvm(new llvm::Module(name, ctx)); - // optimizations - bool has_sm80 = target->as_nvidia() && target->as_nvidia()->sm() >= 80; - // create passes - codegen::analysis::align align; - codegen::transform::inliner inliner; - codegen::analysis::axes axes; - codegen::transform::pipeline pipeline(has_sm80, num_stages); - codegen::transform::disassociate disassociate; - codegen::analysis::layouts layouts(&axes, &align, num_warps, target); - codegen::transform::cts cts(&layouts, has_sm80); - codegen::analysis::liveness liveness(&layouts); - codegen::analysis::swizzle swizzle(&layouts, target); - codegen::analysis::allocation allocation(&liveness); - codegen::transform::dce dce; - codegen::transform::peephole peephole(target, &layouts); - codegen::transform::coalesce coalesce(&align, &layouts, has_sm80); - codegen::transform::prefetch prefetch_s(target); - codegen::transform::membar barriers(&liveness, &layouts, &allocation, - &prefetch_s, target); - codegen::generator isel(&axes, &layouts, &align, &allocation, &swizzle, - target, num_warps); - // run passes - inliner.run(ir); - dce.run(ir); - peephole.run(ir); - dce.run(ir); - pipeline.run(ir); - dce.run(ir); - // ir.print(std::cout); - disassociate.run(ir); - dce.run(ir); - align.run(ir); - axes.run(ir); - layouts.run(ir); - peephole.run(ir); - dce.run(ir); - if (target->is_gpu()) cts.run(ir); - align.run(ir); - axes.run(ir); - layouts.run(ir); - coalesce.run(ir); - dce.run(ir); - align.run(ir); - dce.run(ir); - if (target->is_gpu()) cts.run(ir); - dce.run(ir); - align.run(ir); - axes.run(ir); - layouts.run(ir); - peephole.run(ir); - dce.run(ir); - align.run(ir); - axes.run(ir); - layouts.run(ir); - swizzle.run(ir); - // std::cout << "---" << std::endl; - // ir.print(std::cout); - // std::cout << "---" << std::endl; - // ir.print(std::cout); - liveness.run(ir); - allocation.run(ir); - prefetch_s.run(ir); - barriers.run(ir); - // exit(1); - // ir.print(std::cout); - isel.visit(ir, *llvm); - shared_static = allocation.allocated_size(); - if (target->as_nvidia() && target->as_nvidia()->sm() < 70) { - // sm < 70 (Pascal) has little shared memory resource. - // Instead of having "Error: Invalid argument" on launching a kernel, let's throw an error here. - if (shared_static >= 65536) { - throw std::runtime_error("Device does not support shared memory of " + std::to_string(shared_static) + "bytes"); - } - } - - if (isel.get_extern_lib_map().size() > 0) { - // If there's any extern lib calls, - // we need to link them in. - link_extern_libs(extern_lib_map, isel.get_extern_lib_map(), ir, ctx, llvm); - } - - return llvm; -} - -} // namespace codegen -} // namespace triton diff --git a/lib/codegen/selection/generator.cc b/lib/codegen/selection/generator.cc deleted file mode 100644 index 4bd0baf3467e..000000000000 --- a/lib/codegen/selection/generator.cc +++ /dev/null @@ -1,4157 +0,0 @@ -#include -#include -#include -#include -#include "triton/codegen/selection/generator.h" -#include "triton/codegen/target.h" -#include "triton/codegen/analysis/axes.h" -#include "triton/codegen/analysis/allocation.h" -#include "triton/codegen/analysis/align.h" -#include "triton/codegen/analysis/swizzle.h" -#include "triton/codegen/transform/coalesce.h" -#include "triton/ir/context.h" -#include "triton/ir/module.h" -#include "triton/ir/function.h" -#include "triton/ir/type.h" -#include "triton/ir/utils.h" -#include "llvm/IR/Module.h" -#include "llvm/IR/IRBuilder.h" -#include "llvm/IR/IntrinsicsNVPTX.h" -#include "llvm/IR/BasicBlock.h" -#include "llvm/IR/Attributes.h" -#include "llvm/IR/InlineAsm.h" -#include "llvm/Transforms/Utils/BasicBlockUtils.h" - -namespace triton{ -namespace codegen{ - -using namespace llvm; - -Value* adder::operator()(Value *x, Value *y, const std::string& name) { - // (x + cst) + y -> (x + y) + cst - if(auto* bin = dyn_cast(x)) - if(bin->getOpcode() == llvm::BinaryOperator::BinaryOps::Add) - if(dyn_cast(bin->getOperand(1))){ - return (*builder_)->CreateAdd((*builder_)->CreateAdd(bin->getOperand(0), y), - bin->getOperand(1)); - } - // (x + (y + cst)) -> (x + y) + cst - if(auto* bin = dyn_cast(y)) - if(bin->getOpcode() == llvm::BinaryOperator::BinaryOps::Add) - if(dyn_cast(bin->getOperand(1))){ - return (*builder_)->CreateAdd((*builder_)->CreateAdd(x, bin->getOperand(0)), - bin->getOperand(1)); - } - - // default - return (*builder_)->CreateAdd(x, y, name); -} - -Value* multiplier::operator()(Value *x, Value *y, const std::string &name) { - // (x + cst1) * cst2 -> (x * cst2) + (cst1 * cst2) - if(auto* bin = dyn_cast(x)) - if(bin->getOpcode() == llvm::BinaryOperator::BinaryOps::Add) - if(dyn_cast(bin->getOperand(1))) - if(dyn_cast(y)){ - return (*builder_)->CreateAdd((*builder_)->CreateMul(bin->getOperand(0), y), - (*builder_)->CreateMul(bin->getOperand(1), y)); - } - // default - return (*builder_)->CreateMul(x, y, name); -} - -Value* geper::operator()(Value *ptr, Value* off, const std::string& name){ - // (ptr + cst1) + (cst2) -> ptr + (cst1 + cst2) - if(auto* gep = dyn_cast(ptr)) - if(ConstantInt* cst1 = dyn_cast(gep->idx_begin())) - if(ConstantInt* cst2 = dyn_cast(off)){ - return (*builder_)->CreateGEP(gep->getPointerOperand()->getType()->getScalarType()->getPointerElementType(), - gep->getPointerOperand(), (*builder_)->CreateAdd(cst1, cst2)); - } - // ptr + (off + cst) -> (ptr + off) + cst - if(auto* bin = dyn_cast(off)) - if(bin->getOpcode() == llvm::BinaryOperator::BinaryOps::Add) - if(ConstantInt* cst = dyn_cast(bin->getOperand(1))){ - Value *gep = (*builder_)->CreateGEP(ptr->getType()->getScalarType()->getPointerElementType(), - ptr, bin->getOperand(0)); - return (*builder_)->CreateGEP(gep->getType()->getScalarType()->getPointerElementType(), - gep, bin->getOperand(1)); - } - // default - return (*builder_)->CreateGEP(ptr->getType()->getScalarType()->getPointerElementType(), - ptr, off, name); -} - -//Value* geper::operator()(Type *ty, Value *ptr, std::vector vals, const std::string &name) { -// return (*builder_)->CreateGEP(ty, ptr, vals, name); -//} - -// types -#define void_ty builder_->getVoidTy() -#define f16_ty builder_->getHalfTy() -#define bf16_ty builder_->getInt16Ty() -#define f32_ty builder_->getFloatTy() -#define i1_ty builder_->getInt1Ty() -#define i8_ty builder_->getInt8Ty() -#define i16_ty builder_->getInt16Ty() -#define i32_ty builder_->getInt32Ty() -#define i64_ty builder_->getInt64Ty() -#define vec_ty(type, num_el) VectorType::get(type, num_el, false) -#define ptr_ty(...) PointerType::get(__VA_ARGS__) -// constants -#define i16(...) builder_->getInt16(__VA_ARGS__) -#define i32(...) builder_->getInt32(__VA_ARGS__) -// ops -#define and_(...) builder_->CreateAnd(__VA_ARGS__) -#define atomic_cmp_xchg(...) builder_->CreateAtomicCmpXchg(__VA_ARGS__) -#define atomic_rmw(...) builder_->CreateAtomicRMW(__VA_ARGS__) -#define bin_op(...) builder_->CreateBinOp(__VA_ARGS__) -#define bit_cast(...) builder_->CreateBitCast(__VA_ARGS__) -#define br(...) builder_->CreateBr(__VA_ARGS__) -#define call(...) builder_->CreateCall(__VA_ARGS__) -#define cast(...) builder_->CreateCast(__VA_ARGS__) -#define cond_br(...) builder_->CreateCondBr(__VA_ARGS__) -#define exact_udiv(...) builder_->CreateExactUDiv(__VA_ARGS__) -#define extract_elt(...) builder_->CreateExtractElement(__VA_ARGS__) -#define extract_val(...) builder_->CreateExtractValue(__VA_ARGS__) -#define fadd(...) builder_->CreateFAdd(__VA_ARGS__) -#define fcmp(...) builder_->CreateFCmp(__VA_ARGS__) -#define fcmp_oge(...) builder_->CreateFCmpOGE(__VA_ARGS__) -#define fcmp_ole(...) builder_->CreateFCmpOLE(__VA_ARGS__) -#define fmul(...) builder_->CreateFMul(__VA_ARGS__) -#define fpcast(...) builder_->CreateFPCast(__VA_ARGS__) -#define fsub(...) builder_->CreateFSub(__VA_ARGS__) -#define icmp(...) builder_->CreateICmp(__VA_ARGS__) -#define icmp_eq(...) builder_->CreateICmpEQ(__VA_ARGS__) -#define icmp_sge(...) builder_->CreateICmpSGE(__VA_ARGS__) -#define icmp_sle(...) builder_->CreateICmpSLE(__VA_ARGS__) -#define icmp_uge(...) builder_->CreateICmpUGE(__VA_ARGS__) -#define icmp_ule(...) builder_->CreateICmpULE(__VA_ARGS__) -#define icmp_ult(...) builder_->CreateICmpULT(__VA_ARGS__) -#define insert_elt(...) builder_->CreateInsertElement(__VA_ARGS__) -#define intrinsic(...) builder_->CreateIntrinsic(__VA_ARGS__) -#define load(ptr) builder_->CreateLoad(ptr->getType()->getPointerElementType(), ptr) -#define lshr(...) builder_->CreateLShr(__VA_ARGS__) -#define max_num(...) builder_->CreateMaxNum(__VA_ARGS__) -#define min_num(...) builder_->CreateMinNum(__VA_ARGS__) -#define neg(...) builder_->CreateNeg(__VA_ARGS__) -#define phi(...) builder_->CreatePHI(__VA_ARGS__) -#define ret(...) builder_->CreateRet(__VA_ARGS__) -#define select(...) builder_->CreateSelect(__VA_ARGS__) -#define store(...) builder_->CreateStore(__VA_ARGS__) -#define sub(...) builder_->CreateSub(__VA_ARGS__) -#define shl(...) builder_->CreateShl(__VA_ARGS__) -#define udiv(...) builder_->CreateUDiv(__VA_ARGS__) -#define urem(...) builder_->CreateURem(__VA_ARGS__) -#define splat(...) builder_->CreateVectorSplat(__VA_ARGS__) -#define xor_(...) builder_->CreateXor(__VA_ARGS__) - -/** - * \brief Convert Triton-IR Type to LLVM-IR Type - */ -Type *generator::cvt(ir::type *ty) { - // struct - if(ty->is_struct_ty()){ - std::vector tys; - for(size_t i = 0; i < ty->get_struct_numel(); i++) - tys.push_back(cvt(ty->get_struct_type(i))); - return StructType::get(builder_->getContext(), tys, true); - } - - // function - if(auto* tt = dynamic_cast(ty)){ - Type *ret_ty = cvt(tt->get_return_ty()); - std::vector arg_tys(tt->get_num_params()); - for(size_t i = 0; i < arg_tys.size(); i++) - arg_tys[i] = cvt(tt->get_param_ty(i)); - return FunctionType::get(ret_ty, arg_tys, false); - } - // pointer - if(ty->is_pointer_ty()){ - Type *elt_ty = cvt(ty->get_pointer_element_ty()); - unsigned addr_space = ty->get_pointer_address_space(); - return ptr_ty(elt_ty, addr_space); - } - // integer - if(ty->is_integer_ty()){ - unsigned bitwidth = ty->get_integer_bitwidth(); - return IntegerType::get(*ctx_, bitwidth); - } - // primitive types - switch(ty->get_type_id()){ - case ir::type::VoidTyID: return Type::getVoidTy(*ctx_); - case ir::type::FP8TyID: return Type::getInt8Ty(*ctx_); - case ir::type::FP16TyID: return Type::getHalfTy(*ctx_); - case ir::type::BF16TyID: return Type::getInt16Ty(*ctx_); // use int16 as storage type - case ir::type::FP32TyID: return Type::getFloatTy(*ctx_); - case ir::type::FP64TyID: return Type::getDoubleTy(*ctx_); - case ir::type::LabelTyID: return Type::getLabelTy(*ctx_); - case ir::type::MetadataTyID: return Type::getMetadataTy(*ctx_); - case ir::type::TokenTyID: return Type::getTokenTy(*ctx_); - default: break; - } - // unknown type - throw std::runtime_error("unknown conversion from ir::type to Type"); -} - -/** - * \brief Convert Triton-IR Attribute to LLVM-IR Attribute - */ -llvm::Attribute generator::cvt(ir::attribute attr) { - switch(attr.get_kind()){ - case ir::noalias: return llvm::Attribute::get(*ctx_, llvm::Attribute::NoAlias); - case ir::readonly: return llvm::Attribute::get(*ctx_, llvm::Attribute::ReadOnly); - case ir::writeonly: return llvm::Attribute::get(*ctx_, llvm::Attribute::WriteOnly); - case ir::aligned: return llvm::Attribute::get(*ctx_, llvm::Attribute::Alignment, attr.get_value()); - case ir::retune: return llvm::Attribute::get(*ctx_, llvm::Attribute::None); - default: throw std::runtime_error("cannot convert ir::attribute_t to llvm::Attribute"); - } -} - -/** - * \brief Constructor of LLVM code generator - */ -generator::generator(analysis::axes *a_axes, - analysis::layouts *layouts, - analysis::align *alignment, - analysis::allocation *alloc, - analysis::swizzle *swizzle, - target *tgt, - unsigned num_warps) - : a_axes_(a_axes), layouts_(layouts), alignment_(alignment), alloc_(alloc), swizzle_(swizzle), - tgt_(tgt), num_warps_(num_warps), add(&builder_), mul(&builder_), gep(&builder_) { - -} - -/** - * \brief Code Generation for `value` - */ -void generator::visit_value(ir::value* v) { - if(!seen_.insert(v).second) - return; - if(v->get_type()->is_block_ty()){ - if(analysis::shared_layout* layout = layouts_->get(v)->to_shared()){ - analysis::N_buffer_info_t *n_buffer = layout->get_N_buffer(); - analysis::double_buffer_info_t *double_buffer = layout->get_double_buffer(); - - // offset - Value *offset = nullptr; - // base pointer - Value *ptr = shared_ptr_[layout]; - - if (n_buffer) { - // ptr = base (shared_ptr_[layout]) + smem_idx * size - // read_smem_idx - if (v == n_buffer->phi) { - ptr = shared_ptr_[layout]; - } - // write_smem_idx - if (std::find(n_buffer->firsts.begin(), n_buffer->firsts.end(), v) != n_buffer->firsts.end()) { - int write_smem_idx = /*stage_idx*/n_buffer->firsts_idx.at(v); - int elements = write_smem_idx * layout->get_per_stage_elements(); - ptr = gep(shared_pre_ptr_[layout], i32(elements)); - } else if (v == n_buffer->latch) { - Value* write_smem_idx = write_smem_idx_[layout]; - Value* elements = mul(write_smem_idx, i32(layout->get_per_stage_elements())); - ptr = gep(shared_pre_ptr_[layout], elements); - } - } else if (double_buffer) { - if(v == double_buffer->phi) - offset = shared_off_[layout]; - if(v == double_buffer->latch) - ptr = shared_next_ptr_[layout]; - else if(v == double_buffer->first) - ptr = shared_pre_ptr_[layout]; - } // else do nothing - // what visit_dot & vist_cts & ... see - shmems_[v] = ptr; - // now only latches have offset (PHINode), only used by finalize_share_layout() - shoffs_[v] = offset; - } - } - // visit operands - BasicBlock *current = builder_->GetInsertBlock(); - auto *inst = dynamic_cast(v); - if(inst) - for(ir::value *op: inst->ops()){ - if(dynamic_cast(op) || !dynamic_cast(v)) - visit_value(op); - } - init_idx(v); - // change insert point for phi node - builder_->SetInsertPoint(current); - auto *phi = dynamic_cast(v); - if(phi && !current->empty() && current->getFirstNonPHI()) - builder_->SetInsertPoint(&*current->getFirstNonPHI()); - // visit user - if(auto *usr = dynamic_cast(v)){ - if(!dynamic_cast(usr)) - usr->accept(this); - } - // revert insert point - if(phi && !current->empty() && current->getFirstNonPHI()) - builder_->SetInsertPoint(current); -} - -/** - * \brief Code Generation for `phi` - */ -void generator::visit_phi_node(ir::phi_node* x) { - Type *ty = cvt(x->get_type()->get_scalar_ty()); - for(indices_t idx: idxs_.at(x)) - vals_[x][idx] = phi(ty, x->get_num_operands()); -} - -/** - * \brief Code Generation for `call` - */ -void generator::visit_call_inst(ir::call_inst* call) { - throw std::runtime_error("call not supported! Triton should be inlining everything."); -} - -void generator::visit_launch_inst(ir::launch_inst *launch) { - ir::function* fn = (ir::function*)launch->get_operand(0); - // forward-declare cudaGetParameterBufferV2 - std::vector get_param_arg_tys = {PointerType::get(builder_->getInt8Ty(), 0), - ArrayType::get(builder_->getInt32Ty(), 3), - ArrayType::get(builder_->getInt32Ty(), 3), - builder_->getInt32Ty()}; - FunctionType* get_param_ty = FunctionType::get(PointerType::get(builder_->getInt8Ty(), 0), get_param_arg_tys, false); - Function* get_param_buffer = Function::Create(get_param_ty, Function::ExternalLinkage, "cudaGetParameterBufferV2", mod_); - AllocaInst* grid = builder_->CreateAlloca(get_param_arg_tys[1]); - AllocaInst* block = builder_->CreateAlloca(get_param_arg_tys[2]); - ConstantInt* _0 = builder_->getInt32(0); - ConstantInt* _1 = builder_->getInt32(1); - ConstantInt* _2 = builder_->getInt32(2); - // create basic block - BasicBlock* launch_done_bb = BasicBlock::Create(builder_->getContext(), "launch_done", builder_->GetInsertBlock()->getParent()); - BasicBlock* launch_bb = BasicBlock::Create(builder_->getContext(), "launch", launch_done_bb->getParent(), launch_done_bb); - Value *tid = tgt_->get_local_id(mod_, *builder_, 0); - Value *is_first_thread = builder_->CreateICmpEQ(tid, i32(0)); - builder_->CreateCondBr(is_first_thread, launch_bb, launch_done_bb); - builder_->SetInsertPoint(launch_bb); - - // - builder_->CreateStore(vals_[launch->get_grid()[0]][{}], builder_->CreateGEP(grid, {_0, _0})); - builder_->CreateStore(vals_[launch->get_grid()[1]][{}], builder_->CreateGEP(grid, {_0, _1})); - builder_->CreateStore(vals_[launch->get_grid()[2]][{}], builder_->CreateGEP(grid, {_0, _2})); - Value* num_warps = mul(builder_->getInt32(32), vals_[launch->get_num_warps()][{}]); - builder_->CreateStore(num_warps, builder_->CreateGEP(block, {_0, _0})); - builder_->CreateStore(builder_->getInt32(1), builder_->CreateGEP(block, {_0, _1})); - builder_->CreateStore(builder_->getInt32(1), builder_->CreateGEP(block, {_0, _2})); - Function* called_fn = fns_[fn]; - Value* callee = ConstantExpr::getCast(Instruction::BitCast, called_fn, get_param_arg_tys[0]); - Value* arg_ptr = builder_->CreateCall(get_param_buffer, {callee, builder_->CreateLoad(grid), builder_->CreateLoad(block), builder_->getInt32(0)}); - // forwrd-declare cudaLaunchDeviceV2 - std::vector launch_device_arg_tys = {get_param_ty->getReturnType(), builder_->getInt64Ty()}; - FunctionType* launch_device_ty = FunctionType::get(builder_->getInt32Ty(), launch_device_arg_tys, false); - Function* launch_device = Function::Create(launch_device_ty, Function::ExternalLinkage, "cudaLaunchDeviceV2", mod_); - // TODO: add branch - Value* do_not_launch = builder_->CreateICmpEQ(builder_->CreatePtrToInt(arg_ptr, builder_->getInt64Ty()), - builder_->getInt64(0)); - BasicBlock* launch2_bb = BasicBlock::Create(builder_->getContext(), "launch2", launch_done_bb->getParent(), launch_done_bb); - builder_->CreateCondBr(do_not_launch, launch_done_bb, launch2_bb); - builder_->SetInsertPoint(launch2_bb); - - unsigned addr_space = arg_ptr->getType()->getPointerAddressSpace(); - unsigned off = 0; - unsigned last_size = 0; - for(ir::value* arg: launch->get_values()){ - Value* curr_arg = vals_[arg][{}]; - Type* curr_arg_ty = curr_arg->getType(); - // handle struct alignment - off += last_size; - unsigned size = curr_arg_ty->isPointerTy() ? 8 : curr_arg_ty->getPrimitiveSizeInBits() / 8; - off = (off + size - 1) / size * size; - // get pointer to current arg - Value* curr_arg_ptr = builder_->CreateGEP(arg_ptr, builder_->getInt32(off)); - curr_arg_ptr = builder_->CreateBitCast(curr_arg_ptr, curr_arg_ty->getPointerTo(addr_space)); - // store arg - builder_->CreateStore(curr_arg, curr_arg_ptr); - last_size = size; - } - builder_->CreateCall(launch_device, {arg_ptr, builder_->getInt64(0)}); - builder_->CreateBr(launch_done_bb); - // done - builder_->SetInsertPoint(launch_done_bb); - -} - -/** - * \brief Code Generation for `binary_operator` - */ -void generator::visit_binary_operator(ir::binary_operator*x) { - using ll = llvm::Instruction::BinaryOps; - using tt = ir::binary_op_t; - auto cvt = [](ir::binary_op_t op){ - switch(op) { - case tt::Add: return ll::Add; - case tt::FAdd: return ll::FAdd; - case tt::Sub: return ll::Sub; - case tt::FSub: return ll::FSub; - case tt::Mul: return ll::Mul; - case tt::FMul: return ll::FMul; - case tt::UDiv: return ll::UDiv; - case tt::SDiv: return ll::SDiv; - case tt::FDiv: return ll::FDiv; - case tt::URem: return ll::URem; - case tt::SRem: return ll::SRem; - case tt::FRem: return ll::FRem; - case tt::Shl: return ll::Shl; - case tt::LShr: return ll::LShr; - case tt::AShr: return ll::AShr; - case tt::And: return ll::And; - case tt::Or: return ll::Or; - case tt::Xor: return ll::Xor; - default: throw std::runtime_error("unreachable switch"); - } - }; -// x->print(std::cout); - for(indices_t idx: idxs_.at(x)){ - Value *lhs = vals_[x->get_operand(0)][idx]; - Value *rhs = vals_[x->get_operand(1)][idx]; - // manually select bf16 bin op - if (x->get_operand(0)->get_type()->get_scalar_ty()->is_bf16_ty()) { - assert(x->get_operand(1)->get_type()->get_scalar_ty()->is_bf16_ty()); - if (x->get_op() == tt::FAdd) { // a + b = a * 1.0 + b - InlineAsm *bf16_add_asm = - InlineAsm::get(FunctionType::get(bf16_ty, {bf16_ty, bf16_ty}, false), - "{ .reg .b16 c; \n\t" - " mov.b16 c, 0x3f80U; \n\t" // 1.0 - " fma.rn.bf16 $0, $1, c, $2; } \n\t", - "=h,h,h", false); - vals_[x][idx] = builder_->CreateCall(bf16_add_asm, {lhs, rhs}); - } else if (x->get_op() == tt::FSub) { // a - b = b * (-1.0) + a - InlineAsm *bf16_sub_asm = - InlineAsm::get(FunctionType::get(bf16_ty, {bf16_ty, bf16_ty}, false), - " { .reg .b16 c; \n\t" - " mov.b16 c, 0xbf80U; \n\t" // -1.0 - " fma.rn.bf16 $0, $2, c, $1;} \n\t", - "=h,h,h", false); - vals_[x][idx] = builder_->CreateCall(bf16_sub_asm, {lhs, rhs}); - } else if (x->get_op() == tt::FMul) { // a * b = a*b + 0 - InlineAsm *bf16_mul_asm = - InlineAsm::get(FunctionType::get(bf16_ty, {bf16_ty, bf16_ty}, false), - " { .reg .b16 c; \n\t" - " mov.b16 c, 0x8000U; \n\t" // 0.0 - " fma.rn.bf16 $0, $1, $2, c;} \n\t", - "=h,h,h", false); - vals_[x][idx] = builder_->CreateCall(bf16_mul_asm, {lhs, rhs}); - } else - throw std::runtime_error("invalid bin op for bf16"); - } else { // not bf16 - auto op = cvt(x->get_op()); - if(op == ll::Add) - vals_[x][idx] = add(lhs, rhs); - else if(op == ll::Mul) - vals_[x][idx] = mul(lhs, rhs); - else if(op == ll::FDiv && !x->get_fdiv_ieee_rounding() && - x->get_type()->get_scalar_ty()->is_fp32_ty()){ - InlineAsm *ptx = InlineAsm::get(FunctionType::get(f32_ty, {f32_ty, f32_ty}, false), - " div.full.f32 $0, $1, $2;", "=r,r,r", false); - vals_[x][idx] = builder_->CreateCall(ptx, {lhs, rhs}); - - } - else - vals_[x][idx] = bin_op(op, lhs, rhs); - } - } -} - -/** - * \brief Code Generation for `getelementptr` - */ -void generator::visit_getelementptr_inst(ir::getelementptr_inst* x) { - for(indices_t idx: idxs_.at(x)){ - Value *ptr = vals_[x->get_pointer_operand()][idx]; - std::vector vals; - for(auto it= x->idx_begin(); it != x->idx_end(); it++) - vals.push_back(vals_[*it][idx]); - assert(vals.size() == 1); - vals_[x][idx] = gep(ptr, vals[0]); - } -} - -/** - * \brief Code Generation for `icmp` - */ -void generator::visit_icmp_inst(ir::icmp_inst* x) { - auto cvt = [](ir::cmp_pred_t pred) { - using ll = llvm::CmpInst::Predicate; - using tt = ir::cmp_pred_t; - switch(pred){ - case tt::FIRST_ICMP_PREDICATE: return ll::FIRST_ICMP_PREDICATE; - case tt::ICMP_EQ: return ll::ICMP_EQ; - case tt::ICMP_NE: return ll::ICMP_NE; - case tt::ICMP_UGT: return ll::ICMP_UGT; - case tt::ICMP_UGE: return ll::ICMP_UGE; - case tt::ICMP_ULT: return ll::ICMP_ULT; - case tt::ICMP_ULE: return ll::ICMP_ULE; - case tt::ICMP_SGT: return ll::ICMP_SGT; - case tt::ICMP_SGE: return ll::ICMP_SGE; - case tt::ICMP_SLT: return ll::ICMP_SLT; - case tt::ICMP_SLE: return ll::ICMP_SLE; - case tt::LAST_ICMP_PREDICATE: return ll::LAST_ICMP_PREDICATE; - default: throw std::runtime_error("unreachable switch"); - } - }; - - for(indices_t idx: idxs_.at(x)){ - Value *lhs = vals_[x->get_operand(0)][idx]; - Value *rhs = vals_[x->get_operand(1)][idx]; - vals_[x][idx] = icmp(cvt(x->get_pred()), lhs, rhs); - } -} - -/** - * \brief Code Generation for `fcmp` - */ -void generator::visit_fcmp_inst(ir::fcmp_inst* x) { - auto cvt = [](ir::cmp_pred_t pred) { - using ll = llvm::CmpInst::Predicate; - using tt = ir::cmp_pred_t; - switch(pred){ - case tt::FIRST_FCMP_PREDICATE: return ll::FIRST_FCMP_PREDICATE; - case tt::FCMP_FALSE: return ll::FCMP_FALSE; - case tt::FCMP_OEQ: return ll::FCMP_OEQ; - case tt::FCMP_OGT: return ll::FCMP_OGT; - case tt::FCMP_OGE: return ll::FCMP_OGE; - case tt::FCMP_OLT: return ll::FCMP_OLT; - case tt::FCMP_OLE: return ll::FCMP_OLE; - case tt::FCMP_ONE: return ll::FCMP_ONE; - case tt::FCMP_ORD: return ll::FCMP_ORD; - case tt::FCMP_UNO: return ll::FCMP_UNO; - case tt::FCMP_UEQ: return ll::FCMP_UEQ; - case tt::FCMP_UGT: return ll::FCMP_UGT; - case tt::FCMP_UGE: return ll::FCMP_UGE; - case tt::FCMP_ULT: return ll::FCMP_ULT; - case tt::FCMP_ULE: return ll::FCMP_ULE; - case tt::FCMP_UNE: return ll::FCMP_UNE; - case tt::FCMP_TRUE: return ll::FCMP_TRUE; - case tt::LAST_FCMP_PREDICATE: return ll::LAST_FCMP_PREDICATE; - default: throw std::runtime_error("unreachable switch"); - } - }; - for(indices_t idx: idxs_.at(x)){ - Value *lhs = vals_[x->get_operand(0)][idx]; - Value *rhs = vals_[x->get_operand(1)][idx]; - vals_[x][idx] = fcmp(cvt(x->get_pred()), lhs, rhs); - } -} - - -std::tuple generator::fp32x4_to_fp8x4(Value *in0, Value *in1, Value *in2, Value *in3){ - in0 = cast(llvm::Instruction::FPTrunc, in0, f16_ty); - in1 = cast(llvm::Instruction::FPTrunc, in1, f16_ty); - in2 = cast(llvm::Instruction::FPTrunc, in2, f16_ty); - in3 = cast(llvm::Instruction::FPTrunc, in3, f16_ty); - Value *ret0, *ret1, *ret2, *ret3; - std::tie(ret0, ret1, ret2, ret3) = fp16x4_to_fp8x4(in0, in1, in2, in3); - return std::make_tuple(ret0, ret1, ret2, ret3); -} - -std::tuple generator::fp8x4_to_fp32x4(Value *in0, Value *in1, Value *in2, Value *in3){ - Value *ret0, *ret1, *ret2, *ret3; - std::tie(ret0, ret1, ret2, ret3) = fp8x4_to_fp16x4(in0, in1, in2, in3); - ret0 = cast(llvm::Instruction::FPExt, ret0, f32_ty); - ret1 = cast(llvm::Instruction::FPExt, ret1, f32_ty); - ret2 = cast(llvm::Instruction::FPExt, ret2, f32_ty); - ret3 = cast(llvm::Instruction::FPExt, ret3, f32_ty); - return std::make_tuple(ret0, ret1, ret2, ret3); -} - - -std::tuple generator::fp8x4_to_fp16x4(Value *in0, Value *in1, Value *in2, Value *in3){ - Type *ret_ty = StructType::get(*ctx_, {vec_ty(f16_ty, 2), vec_ty(f16_ty, 2)}); - InlineAsm *ptx = InlineAsm::get(FunctionType::get(ret_ty, {i32_ty}, false), - "{" - ".reg .b32 a<2>, b<2>; \n\t" - "prmt.b32 a0, 0, $2, 0x5040; \n\t" // If input is 0xdcba set a0 to 0xb0a0 - "prmt.b32 a1, 0, $2, 0x7060; \n\t" // If input is 0xdcba set a1 to 0xd0c0 - "lop3.b32 b0, a0, 0x7fff7fff, 0, 0xc0; \n\t" // b0 = a0 & 0x7fff7fff (strip sign) - "lop3.b32 b1, a1, 0x7fff7fff, 0, 0xc0; \n\t" // b1 = a1 & 0x7fff7fff (strip sign) - "shr.b32 b0, b0, 1; \n\t" // b0 >>= 1 (shift into fp16 position) - "shr.b32 b1, b1, 1; \n\t" // b1 >>= 1 (shift into fp16 position) - "lop3.b32 $0, b0, 0x80008000, a0, 0xf8; \n\t" // out0 = b0 | (0x80008000 & a0) (restore sign) - "lop3.b32 $1, b1, 0x80008000, a1, 0xf8; \n\t" // out1 = b1 | (0x80008000 & a1) (restore sign) - "}", "=r,=r,r", false); - Value *packed_in = UndefValue::get(vec_ty(i8_ty, 4)); - packed_in = insert_elt(packed_in, in0, (uint64_t)0); - packed_in = insert_elt(packed_in, in1, (uint64_t)1); - packed_in = insert_elt(packed_in, in2, (uint64_t)2); - packed_in = insert_elt(packed_in, in3, (uint64_t)3); - Value *in = bit_cast(packed_in, i32_ty); - Value *ret = call(ptx, {in}); - Value *packed_ret0 = extract_val(ret, {0}); - Value *packed_ret1 = extract_val(ret, {1}); - Value *ret0 = extract_elt(packed_ret0, (uint64_t)0); - Value *ret1 = extract_elt(packed_ret0, (uint64_t)1); - Value *ret2 = extract_elt(packed_ret1, (uint64_t)0); - Value *ret3 = extract_elt(packed_ret1, (uint64_t)1); - return std::make_tuple(ret0, ret1, ret2, ret3); -} - -std::tuple generator::fp16x4_to_fp8x4(Value *in0, Value *in1, Value *in2, Value *in3) { - /* fp16 bit representation is seeeeemmmmmmmmmm (s=sign, e=exponent, m=mantissa) - * fp8 bit representation is seeeemmm - * The 4 fp8 exponent bits are the low order 4 exponent bits in fp16. - * The 3 fp8 mantissa bits are the high order 3 mantissa bits in fp16. - * Note that the low order exponent bits and high order mantissa bits in fp16 are contiguous. - * We want to round to nearest fp8 value. To do that add 1 to 4th mantissa bit in fp16 (that's - * one more than the number of mantissa bits in fp8). - * fp8 = (fp16 & 0x8000) | (((f16 << 1) + 0x0080) & 0x7fff) - * - * We compute two fp16s in one uint32. The addition could cause bit flips from one fp16 to the - * other. To avoid this we zero out the most significant exponent bit. If that bit is set then - * the value isn't representable in float8 anyway so we assume it's never set (and give garbage - * output if it is). If we were willing to assume the most significant exponent was never set - * we could save the first two lop3.b32 instructions below. - */ - InlineAsm *ptx = InlineAsm::get(FunctionType::get({vec_ty(i8_ty, 4)}, {i32_ty, i32_ty}, false), - "{" - ".reg .b32 a<2>, b<2>; \n\t" - "shl.b32 a0, $1, 1; \n\t" // a0 = input0 << 1 - "shl.b32 a1, $2, 1; \n\t" // a1 = input1 << 1 - "lop3.b32 a0, a0, 0x7fff7fff, 0, 0xc0; \n\t" // a0 = (a0 & 0x7fff7fff) - "lop3.b32 a1, a1, 0x7fff7fff, 0, 0xc0; \n\t" // a1 = (a1 & 0x7fff7fff) - "add.u32 a0, a0, 0x00800080; \n\t" // a0 += 0x00800080 - "add.u32 a1, a1, 0x00800080; \n\t" // a1 += 0x00800080 - "lop3.b32 b0, $1, 0x80008000, a0, 0xea; \n\t" // b0 = (input0 & 0x80008000) | a0 - "lop3.b32 b1, $2, 0x80008000, a1, 0xea; \n\t" // b1 = (input1 & 0x80008000) | a1 - "prmt.b32 $0, b0, b1, 0x7531; \n\t" // If b0 = 0xabcd and b1=0x0123 sets output to 0xac02 - "}", "=r,r,r", false); - Value *packed_in0 = UndefValue::get(vec_ty(f16_ty, 2)); - Value *packed_in1 = UndefValue::get(vec_ty(f16_ty, 2)); - packed_in0 = insert_elt(packed_in0, in0, (int)0); - packed_in0 = insert_elt(packed_in0, in1, (int)1); - packed_in1 = insert_elt(packed_in1, in2, (int)0); - packed_in1 = insert_elt(packed_in1, in3, (int)1); - Value *in_arg0 = bit_cast(packed_in0, i32_ty); - Value *in_arg1 = bit_cast(packed_in1, i32_ty); - Value *ret = call(ptx, {in_arg0, in_arg1}); - Value *ret0 = extract_elt(ret, (int)0); - Value *ret1 = extract_elt(ret, (int)1); - Value *ret2 = extract_elt(ret, (int)2); - Value *ret3 = extract_elt(ret, (int)3); - return std::make_tuple(ret0, ret1, ret2, ret3); -} - -std::tuple generator::fp8x4_to_bf16x4(Value *in0, Value *in1, Value *in2, Value *in3) { - // current exp offset: 15 - // Add 112 (127-15) to compensate the difference in exponent bias - // bf16 = (nosign >> (8-4) + 112 << 7) | sign; - // bf16 = (nosign >> 4 + 0x3800) | sign; - Type *ret_ty = StructType::get(*ctx_, {vec_ty(bf16_ty, 2), vec_ty(bf16_ty, 2)}); - InlineAsm *ptx = InlineAsm::get(FunctionType::get(ret_ty, {i32_ty}, false), - "{" - ".reg .b32 a<2>, sign<2>, nosign<2>, b<2>; \n\t" - "prmt.b32 a0, 0, $2, 0x5040; \n\t" // 0xdcba => 0xb0a0 - "prmt.b32 a1, 0, $2, 0x7060; \n\t" // 0xdcba => 0xd0c0 - "and.b32 sign0, a0, 0x80008000; \n\t" - "and.b32 sign1, a1, 0x80008000; \n\t" - "and.b32 nosign0, a0, 0x7fff7fff; \n\t" - "and.b32 nosign1, a1, 0x7fff7fff; \n\t" - "shr.b32 nosign0, nosign0, 4; \n\t" - "shr.b32 nosign1, nosign1, 4; \n\t" - "add.u32 nosign0, nosign0, 0x38003800; \n\t" - "add.u32 nosign1, nosign1, 0x38003800; \n\t" - "or.b32 $0, sign0, nosign0; \n\t" - "or.b32 $1, sign1, nosign1; \n\t" - "}", "=r,=r,r", false); - Value *packed_in = UndefValue::get(vec_ty(i8_ty, 4)); - packed_in = insert_elt(packed_in, in0, (uint64_t)0); - packed_in = insert_elt(packed_in, in1, (uint64_t)1); - packed_in = insert_elt(packed_in, in2, (uint64_t)2); - packed_in = insert_elt(packed_in, in3, (uint64_t)3); - Value *in = bit_cast(packed_in, i32_ty); - Value *ret = call(ptx, {in}); - Value *packed_ret0 = extract_val(ret, {0}); - Value *packed_ret1 = extract_val(ret, {1}); - Value *ret0 = extract_elt(packed_ret0, (uint64_t)0); - Value *ret1 = extract_elt(packed_ret0, (uint64_t)1); - Value *ret2 = extract_elt(packed_ret1, (uint64_t)0); - Value *ret3 = extract_elt(packed_ret1, (uint64_t)1); - return std::make_tuple(ret0, ret1, ret2, ret3); -} - -std::tuple generator::bf16x4_to_fp8x4(Value *in0, Value *in1, Value *in2, Value *in3) { - /* Assuming fp8 exponent offset is 16. bf16 exponent offset is 127. - Max value in fp8: 0b01111111 (0x7f), - bf16: 3ff0 - Min value in fp8: 0b00000000 (0x00) - bf16: 0x3c00 - // @note: +0x8 is for "rounding to nearest zero" - fp8 = (nosign(bf16) - (112 << 7) + 0x8) << 4; - return fp8 | sign; // also permute bytes - */ - InlineAsm *ptx = InlineAsm::get(FunctionType::get({vec_ty(i8_ty, 4)}, {i32_ty, i32_ty}, false), - "{\n\t" - ".reg .u32 sign, sign<2>, nosign, nosign<2>; \n\t" - ".reg .u32 fp8_min, fp8_max, rn_, zero; \n\t" - "mov.u32 fp8_min, 0x38003800; \n\t" - "mov.u32 fp8_max, 0x3ff03ff0; \n\t" - "mov.u32 rn_, 0x80008; \n\t" - "mov.u32 zero, 0; \n\t" - "and.b32 sign0, $1, 0x80008000; \n\t" - "and.b32 sign1, $2, 0x80008000; \n\t" - "prmt.b32 sign, sign0, sign1, 0x7531; \n\t" - "and.b32 nosign0, $1, 0x7fff7fff; \n\t" - "and.b32 nosign1, $2, 0x7fff7fff; \n\t" - - ".reg .u32 nosign_0_<2>, nosign_1_<2>; \n\t" // nosign = clamp(nosign, min, max) - "and.b32 nosign_0_0, nosign0, 0xffff0000; \n\t" - "max.u32 nosign_0_0, nosign_0_0, 0x38000000; \n\t" - "min.u32 nosign_0_0, nosign_0_0, 0x3ff00000; \n\t" - "and.b32 nosign_0_1, nosign0, 0x0000ffff; \n\t" - "max.u32 nosign_0_1, nosign_0_1, 0x3800; \n\t" - "min.u32 nosign_0_1, nosign_0_1, 0x3ff0; \n\t" - "or.b32 nosign0, nosign_0_0, nosign_0_1; \n\t" - "and.b32 nosign_1_0, nosign1, 0xffff0000; \n\t" - "max.u32 nosign_1_0, nosign_1_0, 0x38000000; \n\t" - "min.u32 nosign_1_0, nosign_1_0, 0x3ff00000; \n\t" - "and.b32 nosign_1_1, nosign1, 0x0000ffff; \n\t" - "max.u32 nosign_1_1, nosign_1_1, 0x3800; \n\t" - "min.u32 nosign_1_1, nosign_1_1, 0x3ff0; \n\t" - "or.b32 nosign1, nosign_1_0, nosign_1_1; \n\t" - - "add.u32 nosign0, nosign0, rn_; \n\t" // round to nearest zero - "add.u32 nosign1, nosign1, rn_; \n\t" - "sub.u32 nosign0, nosign0, 0x38003800; \n\t" // compensate offset - "sub.u32 nosign1, nosign1, 0x38003800; \n\t" - "shr.u32 nosign0, nosign0, 4; \n\t" - "shr.u32 nosign1, nosign1, 4; \n\t" - "prmt.b32 nosign, nosign0, nosign1, 0x6420; \n\t" - "or.b32 $0, nosign, sign; \n\t" - "" - "}", "=r,r,r", false); - Value *packed_in0 = UndefValue::get(vec_ty(bf16_ty, 2)); - Value *packed_in1 = UndefValue::get(vec_ty(bf16_ty, 2)); - packed_in0 = insert_elt(packed_in0, in0, (int)0); - packed_in0 = insert_elt(packed_in0, in1, (int)1); - packed_in1 = insert_elt(packed_in1, in2, (int)0); - packed_in1 = insert_elt(packed_in1, in3, (int)1); - Value *in_arg0 = bit_cast(packed_in0, i32_ty); - Value *in_arg1 = bit_cast(packed_in1, i32_ty); - Value *ret = call(ptx, {in_arg0, in_arg1}); - Value *ret0 = extract_elt(ret, (int)0); - Value *ret1 = extract_elt(ret, (int)1); - Value *ret2 = extract_elt(ret, (int)2); - Value *ret3 = extract_elt(ret, (int)3); - return std::make_tuple(ret0, ret1, ret2, ret3); -} - -Value* generator::bf16_to_fp32(Value *in0){ - if (tgt_->as_nvidia()->sm() >= 80) { - InlineAsm *ptx = InlineAsm::get(FunctionType::get(f32_ty, {bf16_ty}, false), - "cvt.rn.f32.bf16 $0, $1;", "=r,h", false); - return call(ptx, {in0}); - } else { - Value *ret = UndefValue::get(vec_ty(i16_ty, 2)); - ret = insert_elt(ret, bit_cast(in0, i16_ty), (uint64_t)1); - ret = insert_elt(ret, bit_cast(builder_->getInt16(0), i16_ty), (uint64_t)0); - return bit_cast(ret, f32_ty); - } -} - -Value* generator::fp32_to_bf16(Value *in0){ - if(tgt_->as_nvidia()->sm() >= 80){ - InlineAsm *ptx = InlineAsm::get(FunctionType::get(bf16_ty, {f32_ty}, false), - "cvt.rn.bf16.f32 $0, $1;", "=h,r", false); - return call(ptx, {in0}); - } - return extract_elt(bit_cast(in0, vec_ty(i16_ty, 2)), (uint64_t)1); -} - -/** - * \brief Code Generation for `cast` - */ -void generator::visit_cast_inst(ir::cast_inst* x) { - ir::value *op = x->get_operand(0); - ir::type* ret_sca_ty = x->get_type()->get_scalar_ty(); - ir::type* op_sca_ty = op->get_type()->get_scalar_ty(); - auto x_idxs = idxs_.at(x); - auto op_idxs = idxs_.at(op); - - // <> FP8 - if(ret_sca_ty->is_fp8_ty() || op_sca_ty->is_fp8_ty()){ - // ensure that conversions can be vectorized - int ld = layouts_->get(x)->get_order(0); - int contiguous = layouts_->get(x)->to_scanline()->nts(ld); - if(contiguous % 4 != 0) - throw std::runtime_error("unsupported fp32 -> fp8 conversion"); - - // run the conversion - auto cvt = [&](Value* a, Value* b, Value* c, Value* d){ - if(op_sca_ty->is_fp32_ty() && ret_sca_ty->is_fp8_ty()) - return fp32x4_to_fp8x4(a, b, c, d); - if(op_sca_ty->is_fp16_ty() && ret_sca_ty->is_fp8_ty()) - return fp16x4_to_fp8x4(a, b, c, d); - if(op_sca_ty->is_fp8_ty() && ret_sca_ty->is_fp16_ty()) - return fp8x4_to_fp16x4(a, b, c, d); - if(op_sca_ty->is_fp8_ty() && ret_sca_ty->is_fp32_ty()) - return fp8x4_to_fp32x4(a, b, c, d); - // fp8 <> bf16 - if(op_sca_ty->is_fp8_ty() && ret_sca_ty->is_bf16_ty()) - return fp8x4_to_bf16x4(a, b, c, d); - if (op_sca_ty->is_bf16_ty() && ret_sca_ty->is_fp8_ty()) - return bf16x4_to_fp8x4(a, b, c, d); - throw std::runtime_error("unsupported conversion"); - }; - for(size_t i = 0; i < x_idxs.size(); i+=4){ - std::tie(vals_[x][x_idxs[i+0]], - vals_[x][x_idxs[i+1]], - vals_[x][x_idxs[i+2]], - vals_[x][x_idxs[i+3]]) = cvt(vals_[op][op_idxs[i+0]], - vals_[op][op_idxs[i+1]], - vals_[op][op_idxs[i+2]], - vals_[op][op_idxs[i+3]]); - } - return; - } - - // <> BF16 - if(ret_sca_ty->is_bf16_ty() || op_sca_ty->is_bf16_ty()){ - // FP32 -> BF16 - if(op_sca_ty->is_fp32_ty()){ - for (indices_t idx: idxs_.at(x)) { - Value *arg = vals_[x->get_operand(0)][idx]; - vals_[x][idx] = fp32_to_bf16(arg); // cast(cvt(x->get_op()), arg, ty); - } - return; - } - // BF16 -> FP32 - if(ret_sca_ty->is_fp32_ty()){ - for(size_t i = 0; i < x_idxs.size(); i++) - vals_[x][x_idxs[i + 0]] = bf16_to_fp32(vals_[op][op_idxs[i + 0]]); - return; - } - } - - - Type *ty = cvt(x->get_type()->get_scalar_ty()); - auto cvt = [](ir::cast_op_t op){ - using ll = llvm::Instruction::CastOps; - using tt = ir::cast_op_t; - switch(op){ - case tt::Trunc: return ll::Trunc; - case tt::ZExt: return ll::ZExt; - case tt::SExt: return ll::SExt; - case tt::FPTrunc: return ll::FPTrunc; - case tt::FPExt: return ll::FPExt; - case tt::UIToFP: return ll::UIToFP; - case tt::SIToFP: return ll::SIToFP; - case tt::FPToUI: return ll::FPToUI; - case tt::FPToSI: return ll::FPToSI; - case tt::PtrToInt: return ll::PtrToInt; - case tt::IntToPtr: return ll::IntToPtr; - case tt::BitCast: return ll::BitCast; - case tt::AddrSpaceCast: return ll::AddrSpaceCast; - default: throw std::runtime_error("unreachable switch"); - } - }; - for(indices_t idx: idxs_.at(x)){ - Value *arg = vals_[x->get_operand(0)][idx]; - vals_[x][idx] = cast(cvt(x->get_op()), arg, ty); - } -} - -std::tuple generator::int16_to_float16x8( - Value *in0, Value *scale_x512, Value *shift -){ - /* unpacking 8 int2s packed into an int16 to 8 float16s - * the algorithm is similar to - * https://github.com/pytorch/FBGEMM/blob/6a59bb6621ba9ec7d650ccb78b78ea24d62a3904/ - fbgemm_gpu/include/fbgemm_gpu/fbgemm_cuda_utils.cuh#L1492-L1563 - */ - Type *ret_ty = StructType::get(*ctx_, {vec_ty(f16_ty, 2), vec_ty(f16_ty, 2), vec_ty(f16_ty, 2), vec_ty(f16_ty, 2)}); - InlineAsm *ptx = InlineAsm::get(FunctionType::get(ret_ty, {i32_ty, i32_ty, i32_ty}, false), - "{" - ".reg .b32 a<2>, b<4>; \n\t" // input is 0xab,cd,ef,gh,ab,cd,ef,gh, each a, b etc occupies two bits. - "and.b32 a0, 0x30300303, $4; \n\t" // set a0 to 0x0b,00,0f,00,00,0d,00,0h - "and.b32 a1, 0xc0c00c0c, $4; \n\t" // set a1 to 0xa0,00,e0,00,00,c0,00,g0 - "prmt.b32 b0, 0, a0, 0x0504; \n\t" // set b0 to 0x00,00,00,0d,00,00,00,0h - "prmt.b32 b1, 0, a1, 0x0504; \n\t" // set b1 to 0x00,00,00,c0,00,00,00,g0 - "prmt.b32 b2, 0, a0, 0x0706; \n\t" // set b2 to 0x00,00,0b,00,00,00,0f,00 - "prmt.b32 b3, 0, a1, 0x0706; \n\t" // set b3 to 0x00,00,a0,00,00,00,e0,00 - "mov.b32 a0, 0x78007800; \n\t" // a0 = 32768 - "mov.b32 a1, 0x70007000; \n\t" // a1 = 8192 - "mul.f16x2 b0, b0, a0; \n\t" // b0 = b0 * 32768. - "mul.f16x2 b1, b1, a1; \n\t" // b1 = b1 * 8192. - "mov.b32 a0, 0x68006800; \n\t" // a0 = 2048 - "mov.b32 a1, 0x60006000; \n\t" // a1 = 512 - "mul.f16x2 b2, b2, a0; \n\t" // b2 = b2 * 2048. - "mul.f16x2 b3, b3, a1; \n\t" // b3 = b3 * 512. - "fma.rn.f16x2 $0, b0, $5, $6; \n\t" // out0 = b0 * scale + shift. - "fma.rn.f16x2 $1, b1, $5, $6; \n\t" // out1 = b1 * scale + shift. - "fma.rn.f16x2 $2, b2, $5, $6; \n\t" // out2 = b2 * scale + shift. - "fma.rn.f16x2 $3, b3, $5, $6; \n\t" // out3 = b3 * scale + shift. - "}", "=r,=r,=r,=r,r,r,r", false); - - Value *packed_in = UndefValue::get(vec_ty(i16_ty, 2)); - packed_in = insert_elt(packed_in, in0, (int)0); - packed_in = insert_elt(packed_in, in0, (int)1); - Value *in = bit_cast(packed_in, i32_ty); - - Value *ret = call(ptx, {in, scale_x512, shift}); - Value *packed_ret0 = extract_val(ret, {0}); - Value *packed_ret1 = extract_val(ret, {1}); - Value *packed_ret2 = extract_val(ret, {2}); - Value *packed_ret3 = extract_val(ret, {3}); - Value *ret0 = extract_elt(packed_ret0, (uint64_t)0); // h - Value *ret1 = extract_elt(packed_ret1, (uint64_t)0); // g - Value *ret2 = extract_elt(packed_ret2, (uint64_t)0); // f - Value *ret3 = extract_elt(packed_ret3, (uint64_t)0); // e - Value *ret4 = extract_elt(packed_ret0, (uint64_t)1); // d - Value *ret5 = extract_elt(packed_ret1, (uint64_t)1); // c - Value *ret6 = extract_elt(packed_ret2, (uint64_t)1); // b - Value *ret7 = extract_elt(packed_ret3, (uint64_t)1); // a - return std::make_tuple(ret0, ret1, ret2, ret3, ret4, ret5, ret6, ret7); -} - -std::tuple generator::int32_to_float16x8( - Value *in0, Value *scale_x512, Value *shift -){ - /* unpacking 8 int4s packed into an int32 to 8 float16s - * the algorithm is similar to - * https://github.com/pytorch/FBGEMM/blob/6a59bb6621ba9ec7d650ccb78b78ea24d62a3904/ - fbgemm_gpu/include/fbgemm_gpu/fbgemm_cuda_utils.cuh#L1566-L1619 - */ - Type *ret_ty = StructType::get(*ctx_, {vec_ty(f16_ty, 2), vec_ty(f16_ty, 2), vec_ty(f16_ty, 2), vec_ty(f16_ty, 2)}); - InlineAsm *ptx = InlineAsm::get(FunctionType::get(ret_ty, {i32_ty, i32_ty, i32_ty}, false), - "{" - ".reg .b32 a<2>, b<4>; \n\t" - "and.b32 a0, 0x0f0f0f0f, $4; \n\t" // If input is 0xabcdefgh set a to 0x0b0d0f0h - "and.b32 a1, 0xf0f0f0f0, $4; \n\t" // If input is 0xabcdefgh set a to 0xa0c0e0g0 - "prmt.b32 b0, 0, a0, 0x0504; \n\t" // set b0 to 0x000f000h - "prmt.b32 b1, 0, a1, 0x0504; \n\t" // set b1 to 0x00e000g0 - "prmt.b32 b2, 0, a0, 0x0706; \n\t" // set b2 to 0x000b000d - "prmt.b32 b3, 0, a1, 0x0706; \n\t" // set b3 to 0x00a000c0 - "mov.b32 a0, 0x78007800; \n\t" - "mov.b32 a1, 0x68006800; \n\t" - "mul.f16x2 b0, b0, a0; \n\t" // b0 = b0 * 32768. - "mul.f16x2 b1, b1, a1; \n\t" // b1 = b1 * 2048. - "mul.f16x2 b2, b2, a0; \n\t" // b2 = b2 * 32768. - "mul.f16x2 b3, b3, a1; \n\t" // b3 = b3 * 2048. - "fma.rn.f16x2 $0, b0, $5, $6; \n\t" // out0 = b0 * scale + shift. - "fma.rn.f16x2 $1, b1, $5, $6; \n\t" // out1 = b1 * scale + shift. - "fma.rn.f16x2 $2, b2, $5, $6; \n\t" // out0 = b0 * scale + shift. - "fma.rn.f16x2 $3, b3, $5, $6; \n\t" // out1 = b1 * scale + shift. - "}", "=r,=r,=r,=r,r,r,r", false); - - Value *ret = call(ptx, {in0, scale_x512, shift}); - Value *packed_ret0 = extract_val(ret, {0}); - Value *packed_ret1 = extract_val(ret, {1}); - Value *packed_ret2 = extract_val(ret, {2}); - Value *packed_ret3 = extract_val(ret, {3}); - Value *ret0 = extract_elt(packed_ret0, (uint64_t)0); // h - Value *ret1 = extract_elt(packed_ret1, (uint64_t)0); // g - Value *ret2 = extract_elt(packed_ret0, (uint64_t)1); // f - Value *ret3 = extract_elt(packed_ret1, (uint64_t)1); // e - Value *ret4 = extract_elt(packed_ret2, (uint64_t)0); // d - Value *ret5 = extract_elt(packed_ret3, (uint64_t)0); // c - Value *ret6 = extract_elt(packed_ret2, (uint64_t)1); // b - Value *ret7 = extract_elt(packed_ret3, (uint64_t)1); // a - return std::make_tuple(ret0, ret1, ret2, ret3, ret4, ret5, ret6, ret7); -} - -std::tuple generator::int32_to_float16x4(Value *in0, Value *scale_x512, Value *shift){ - /* unpacking 4 int8s packed into an int32 to 4 fp16s - * the algorithm is similar to - * https://github.com/pytorch/FBGEMM/blob/6a59bb6621ba9ec7d650ccb78b78ea24d62a3904/ - fbgemm_gpu/include/fbgemm_gpu/fbgemm_cuda_utils.cuh#L1622-L1646 - */ - Type *ret_ty = StructType::get(*ctx_, {vec_ty(f16_ty, 2), vec_ty(f16_ty, 2)}); - InlineAsm *ptx = InlineAsm::get(FunctionType::get(ret_ty, {i32_ty, i32_ty, i32_ty}, false), - "{" - ".reg .b32 a, b<2>; \n\t" - "prmt.b32 b0, 0, $2, 0x0504; \n\t" // If input is 0xabcdefgh set b0 to 0x00ef00gh - "prmt.b32 b1, 0, $2, 0x0706; \n\t" // If input is 0xabcdefgh set b1 to 0x00ab00cd - "mov.b32 a, 0x78007800; \n\t" - "mul.f16x2 b0, b0, a; \n\t" // b0 = b0 * 32768. - "mul.f16x2 b1, b1, a; \n\t" // b1 = b1 * 32768. - "fma.rn.f16x2 $0, b0, $3, $4; \n\t" // out0 = b0 * scale + shift. - "fma.rn.f16x2 $1, b1, $3, $4; \n\t" // out1 = b1 * scale + shift. - "}", "=r,=r,r,r,r", false); - - Value *ret = call(ptx, {in0, scale_x512, shift}); - Value *packed_ret0 = extract_val(ret, {0}); - Value *packed_ret1 = extract_val(ret, {1}); - Value *ret0 = extract_elt(packed_ret0, (uint64_t)0); // gh - Value *ret1 = extract_elt(packed_ret0, (uint64_t)1); // ef - Value *ret2 = extract_elt(packed_ret1, (uint64_t)0); // cd - Value *ret3 = extract_elt(packed_ret1, (uint64_t)1); // ab - return std::make_tuple(ret0, ret1, ret2, ret3); -} - -std::tuple generator::prepare_scale_shift(Value *scale, Value *shift){ - Value *scale_x512 = fmul(scale, bit_cast(i16(0x6000), f16_ty)); - Value *p_scale_x512 = UndefValue::get(vec_ty(f16_ty, 2)); - p_scale_x512 = insert_elt(p_scale_x512, scale_x512, (int)0); - p_scale_x512 = insert_elt(p_scale_x512, scale_x512, (int)1); - p_scale_x512 = bit_cast(p_scale_x512, i32_ty); - - Value *p_shift = UndefValue::get(vec_ty(f16_ty, 2)); - p_shift = insert_elt(p_shift, shift, (int)0); - p_shift = insert_elt(p_shift, shift, (int)1); - p_shift = bit_cast(p_shift, i32_ty); - - return std::make_tuple(p_scale_x512, p_shift); -} - -/** - * \brief Code Generation for `dequantize` - */ -void generator::visit_dequantize_inst(ir::dequantize_inst* x) { - ir::value *op = x->get_operand(0); - - auto src_ty_size_in_bits = op->get_type()->get_scalar_ty()->get_primitive_size_in_bits(); - - auto ret_last_dim = (x->get_type()->get_block_shapes()).back(); - auto op_last_dim = (op->get_type()->get_block_shapes()).back(); - - auto x_idxs = idxs_.at(x); - auto op_idxs = idxs_.at(op); - - ir::value *scale = x->get_operand(1); - ir::value *shift = x->get_operand(2); - - Value *p_scale_x512, *p_shift; - std::tie(p_scale_x512, p_shift) = prepare_scale_shift(vals_[scale][{}], vals_[shift][{}]); - - int ld = layouts_->get(x)->get_order(0); - int contiguous = layouts_->get(x)->to_scanline()->nts(ld); - - int op_ld = layouts_->get(op)->get_order(0); - int op_contiguous = layouts_->get(op)->to_scanline()->nts(op_ld); - - std::string err_msg; - err_msg = "unsupported dequantization, cannot vectorize properly. x_idxs.size(): " - + std::to_string(x_idxs.size()) + "; op_idxs.size(): " - + std::to_string(op_idxs.size()) + "; contiguous: " - + std::to_string(contiguous) + "; op_contiguous: " - + std::to_string(op_contiguous) + ". if the condition " - "is not met, please try adjusting block_size, num_warps or " - "using tl.multiple_of to hint the input/output ptr address."; - - if (ret_last_dim == 8 * op_last_dim) { - if((x_idxs.size() != 8 * op_idxs.size()) || (contiguous != 8 * op_contiguous)) { - throw std::runtime_error(err_msg); - } - - auto cvt = [&]( - Value* a, Value* scale, Value* shift - ){ - if (src_ty_size_in_bits == 16){ // int2 quantization, int16 to 8 fp16s - return int16_to_float16x8(a, scale, shift); - } else if (src_ty_size_in_bits == 32) { // int4 quantization, int32 to 8 fp16s - return int32_to_float16x8(a, scale, shift); - } else { - throw std::runtime_error("unsupported conversion"); - } - }; - - for(size_t j = 0; j < op_idxs.size(); j++){ - size_t i = j * 8; - std::tie(vals_[x][x_idxs[i+0]], - vals_[x][x_idxs[i+1]], - vals_[x][x_idxs[i+2]], - vals_[x][x_idxs[i+3]], - vals_[x][x_idxs[i+4]], - vals_[x][x_idxs[i+5]], - vals_[x][x_idxs[i+6]], - vals_[x][x_idxs[i+7]]) = cvt(vals_[op][op_idxs[j]], p_scale_x512, p_shift); - } - } else if (ret_last_dim == 4 * op_last_dim && src_ty_size_in_bits == 32) { // int8 quantization, int32 to 4 fp16s - if((x_idxs.size() != 4 * op_idxs.size()) || (contiguous != 4 * op_contiguous)) { - throw std::runtime_error(err_msg); - } - - auto cvt = [&](Value* a, Value* scale, Value* shift){ - return int32_to_float16x4(a, scale, shift); - }; - - for(size_t j = 0; j < op_idxs.size(); j++){ - size_t i = j * 4; - std::tie(vals_[x][x_idxs[i+0]], - vals_[x][x_idxs[i+1]], - vals_[x][x_idxs[i+2]], - vals_[x][x_idxs[i+3]]) = cvt(vals_[op][op_idxs[j]], p_scale_x512, p_shift); - } - } else { - throw std::runtime_error("unsupported dequantization"); - } - return; -} - -/** - * \brief Code Generation for `return` - */ -void generator::visit_return_inst(ir::return_inst* rr) { - ir::value *ret_val = rr->get_return_value(); - ret(ret_val ? vals_[ret_val][{}] : nullptr); -} - -/** - * \brief Code Generation for `cond_branch` - */ -void generator::visit_cond_branch_inst(ir::cond_branch_inst* br) { - BasicBlock *true_dest = bbs_.at(br->get_true_dest()); - BasicBlock *false_dest = bbs_.at(br->get_false_dest()); - Value *cond = vals_[br->get_cond()][{}]; - cond_br(cond, true_dest, false_dest); -} - -/** - * \brief Code Generation for `uncond_branch` - */ -void generator::visit_uncond_branch_inst(ir::uncond_branch_inst* br) { - BasicBlock *dest = bbs_.at(br->get_dest()); - br(dest); -} - -/** - * \brief Code Generation for a (synchronous) `load` - */ -void generator::visit_load_inst(ir::load_inst* x){ - BasicBlock *current = builder_->GetInsertBlock(); - Module *module = current->getModule(); - Value *tid = tgt_->get_local_id(module, *builder_, 0); - Value *lane = urem(tid, i32(32)); - ir::value *op = x->get_pointer_operand(); - ir::masked_load_inst *mx = dynamic_cast(x); - Type* ty = cvt(op->get_type()->get_scalar_ty()->get_pointer_element_ty()); - // compute vector width - size_t vec = 1; - bool is_mma_first_row = false; - if(op->get_type()->is_block_ty()){ - auto ord = ords_.at(op); - size_t aln = alignment_->get(op, ord[0]); - if(mx){ - size_t max_eq = alignment_->get_cst_info(mx->get_mask_operand())[ord[0]].num_cst; - max_eq = std::max(max_eq, 1); - aln = std::min(aln, max_eq); - } - analysis::distributed_layout* layout = dynamic_cast(layouts_->get(x)); - assert(layout); - - vec = std::min(layout->contig_per_thread(ord[0]), aln); - // TODO: generalize - is_mma_first_row = (ord.size() >= 1) && layout->to_mma() && - (a_axes_->get(x, ord[0]) == layouts_->get(x)->get_axis(1)); - if(is_mma_first_row) - vec = std::min(2, aln); - } - // code generation - auto idxs = idxs_.at(x); - for(size_t i = 0; i < idxs.size(); i += vec){ - indices_t idx = idxs[i]; - // pointer value - Value *ptr = vals_[op][idx]; - // masked load - size_t dtsize = x->get_type()->get_scalar_ty()->get_primitive_size_in_bits() / 8; - // input ptr info - GetElementPtrInst *in_gep = dyn_cast(ptr); - size_t in_off; - if(in_gep){ - ConstantInt* cst = dyn_cast(in_gep->idx_begin()); - in_off = cst ? cst->getValue().getSExtValue()*dtsize : 0; - ptr = cst ? in_gep->getPointerOperand() : in_gep; - } - else{ - in_off = 0; - } - Value *pred = mx ? vals_[mx->get_mask_operand()][idx] : builder_->getTrue(); - // if(!op->get_type()->is_block_ty()){ - // pred = builder_->CreateAnd(pred, icmp_eq(tid, i32(0))); - // } - Value *other = mx ? vals_[mx->get_false_value_operand()][idx] : nullptr; - size_t nbits = dtsize*8; - // pack sub-words (< 32/64bits) into words - // each load has width min(nbits*vec, 32/64) - // and there are (nbits * vec)/width of them - int max_word_width = std::max(32, nbits); - int tot_width = nbits*vec; - int width = std::min(tot_width, max_word_width); - int n_words = std::max(1, tot_width / width); - bool has_l2_evict_policy = (x->get_eviction_policy() != ir::load_inst::NORMAL) && tgt_->as_nvidia()->sm() >= 80; - has_l2_evict_policy = false; - // has_evict_policy = false; // currently disable until supported in `store` - // ----- - // create inline asm string - // ----- - std::ostringstream asm_oss; - asm_oss << "@$" << n_words; // predicate - asm_oss << " ld"; - if(x->get_is_volatile()) - asm_oss << ".volatile"; - asm_oss << ".global"; - if (x->get_cache_modifier() == ir::load_inst::CA) asm_oss << ".ca"; - if (x->get_cache_modifier() == ir::load_inst::CG) asm_oss << ".cg"; - if (x->get_eviction_policy() == ir::load_inst::EVICT_FIRST) asm_oss << ".L1::evict_first"; - if (x->get_eviction_policy() == ir::load_inst::EVICT_LAST) asm_oss << ".L1::evict_last"; - if (has_l2_evict_policy) asm_oss << ".L2::cache_hint"; - if(n_words > 1) - asm_oss << ".v" << n_words; // vector width - asm_oss << ".b" << width; // word size - asm_oss << " {"; - for(int i = 0; i < n_words; i++){ // return values - if(i > 0) asm_oss << ","; - asm_oss << "$" << i; - } - asm_oss << "}"; - asm_oss << ", [ $" << n_words + 1; // load - asm_oss << " + " << in_off << "]"; // constant offset - if (has_l2_evict_policy) asm_oss << ", $" << n_words + 2; - asm_oss << ";"; - bool has_other = other && (other != UndefValue::get(other->getType())); - std::vector others; - // handle `other` values for indices where the mask - // is false - if(has_other) - for(size_t ii = 0; ii < n_words; ii++){ - size_t size = width / nbits; - Value *v = UndefValue::get(vec_ty(ty, size)); - for(size_t s = 0; s < size; s++){ - ir::value *false_val = mx->get_false_value_operand(); - v = insert_elt(v, vals_[false_val][idxs[i + ii*size + s]], s); - } - v = bit_cast(v, IntegerType::get(*ctx_, width)); - // PTX doesn't support mov.u8, so we need to use mov.u16 - auto mov_width = width < 16 ? 16 : width; - asm_oss << "\n "; - asm_oss << "@!$" << n_words << " mov.u" << mov_width; - asm_oss << " $" << ii << ", "; - std::ios_base::fmtflags flags(asm_oss.flags()); - if(ConstantInt* cst = dyn_cast(v)) - asm_oss << "0x" << std::hex << cst->getSExtValue(); - else{ - asm_oss << "$" << n_words + has_l2_evict_policy + 2 + ii; - others.push_back(v); - } - asm_oss.flags(flags); - asm_oss << ";"; - } - // ---- - // create inline ASM signature - // --- - std::vector ret_tys(n_words, IntegerType::get(*ctx_, width)); - Type* ret_ty = ret_tys.size() > 1 ? StructType::get(*ctx_, ret_tys) : ret_tys[0]; - // ret_ty->print(llvm::outs()); - std::vector arg_tys = {pred->getType(), ptr->getType()}; - for(Value *v: others) - arg_tys.push_back(v->getType()); - if (has_l2_evict_policy) - arg_tys.push_back(i64_ty); - FunctionType *asm_ty = FunctionType::get(ret_ty, arg_tys, false); - // --- - // create inline ASM constraints - // --- - std::string asm_cstrt; - for(int ii = 0; ii < n_words; ii++){ - if(ii > 0) asm_cstrt += ","; - asm_cstrt += (width == 64) ? "=l" : ((width == 32) ? "=r" : "=c"); - } - asm_cstrt += ",b,l"; - for(size_t ii = 0; ii < others.size(); ii++){ - asm_cstrt += ","; - asm_cstrt += (width == 64) ? "l" : ((width == 32) ? "r" : "c"); - } - if (has_l2_evict_policy) - asm_cstrt += ",l"; - // --- - // finally call inline ASM - // --- - InlineAsm *inlineAsm = InlineAsm::get(asm_ty, asm_oss.str(), asm_cstrt, true); - std::vector args = {pred, ptr}; - for(Value *v: others) - args.push_back(v); - if (has_l2_evict_policy) - args.push_back(policies_.at(x->get_eviction_policy())); - - - Value *_ret = call(inlineAsm, args); - // if(!op->get_type()->is_block_ty()){ - // Value* cond = icmp_eq(tid, i32(0)); - // Value* shptr = bit_cast(shmem_, ptr_ty(_ret->getType(), 3)); - // Instruction* bar = add_barrier(); - // Instruction *term = llvm::SplitBlockAndInsertIfThen(cond, bar, false); - // builder_->SetInsertPoint(term); - // store(_ret, shptr); - // builder_->SetInsertPoint(bar->getParent()); - // _ret = load(shptr); - // add_barrier(); - // } - - // --- - // extract and store return values - // --- - std::vector rets; - for(unsigned int ii = 0; ii < n_words; ii++){ - Value *curr; - if(ret_ty->isStructTy()) - curr = extract_val(_ret, {ii}); - else - curr = _ret; - rets.push_back(bit_cast(curr, vec_ty(ty, width / (dtsize*8)))); - } - int tmp = (width / (dtsize * 8)); - for(size_t ii = 0; ii < vec; ii++) - vals_[x][idxs[i+ii]] = extract_elt(rets[ii/tmp], ii % tmp); - } -} - -void generator::visit_unmasked_load_inst(ir::unmasked_load_inst* x) { - visit_load_inst(x); -} -void generator::visit_masked_load_inst(ir::masked_load_inst* x) { - visit_load_inst(x); -} - -/** - * \brief Code Generation for a (synchronous) `store` - */ - -void generator::visit_store_inst(ir::store_inst * x){ - ir::masked_store_inst *mx = dynamic_cast(x); - // operands - ir::value *ptr_op = x->get_pointer_operand(); - ir::value *val_op = x->get_value_operand(); - ir::value *msk_op = nullptr; - if(auto* msk_st = dynamic_cast(x)) - msk_op = msk_st->get_mask_operand(); - // vector size - size_t vec = 1; - if(val_op->get_type()->is_block_ty()){ - auto ord = ords_.at(x->get_pointer_operand()); - size_t aln = alignment_->get(ptr_op, ord[0]); - size_t nts = axes_.at(a_axes_->get(x->get_pointer_operand(), ord[0])).contiguous; - if(mx){ - size_t max_eq = alignment_->get_cst_info(mx->get_mask_operand())[ord[0]].num_cst; - max_eq = std::max(max_eq, 1); - aln = std::min(aln, max_eq); - } - analysis::distributed_layout* layout = dynamic_cast(layouts_->get(ptr_op)); - assert(layout); - // vec = std::min(nts, aln); - vec = std::min(layout->contig_per_thread(ord[0]), aln); - // TODO: generalize - bool is_mma_first_row = (ord.size() >= 1) && layout->to_mma() && - (a_axes_->get(ptr_op, ord[0]) == layouts_->get(ptr_op)->get_axis(1)); - if(is_mma_first_row) - vec = std::min(2, aln); - } - bool has_l2_evict_policy = (x->get_eviction_policy() != ir::load_inst::NORMAL) && tgt_->as_nvidia()->sm() >= 80; - has_l2_evict_policy = false; - auto idxs = idxs_.at(val_op); - Type *ty = cvt(val_op->get_type()->get_scalar_ty()); - if(ty->isIntegerTy(1)) - ty = builder_->getInt8Ty(); - for(size_t i = 0; i < idxs.size(); i += vec){ - indices_t idx = idxs[i]; - // pointers - Value *ptr = vals_[ptr_op][idx]; - size_t dtsize = std::max(1, val_op->get_type()->get_scalar_ty()->get_primitive_size_in_bits() / 8); - GetElementPtrInst *in_gep = dyn_cast(ptr); - size_t in_off; - if(in_gep){ - ConstantInt* cst = dyn_cast(in_gep->idx_begin()); - in_off = cst ? cst->getValue().getSExtValue()*dtsize : 0; - ptr = cst ? in_gep->getPointerOperand() : in_gep; - } - else{ - in_off = 0; - } - // mask - Value *pred = msk_op ? vals_[msk_op][idx] : builder_->getTrue(); - size_t nbits = dtsize*8; - // pack sub-words (< 32/64bits) into words - // each load has width min(nbits*vec, 32/64) - // and there are (nbits * vec)/width of them - int max_word_width = std::max(32, nbits); - int tot_width = nbits*vec; - int width = std::min(tot_width, max_word_width); - int n_words = std::max(1, tot_width / width); - // ----- - // create inline asm string - // ----- - std::ostringstream asm_oss; - asm_oss << "@$0"; // predicate - asm_oss << " st.global"; - if (has_l2_evict_policy) asm_oss << ".L2::cache_hint"; - if(n_words > 1) - asm_oss << ".v" << n_words; // vector width - asm_oss << ".b" << width; // word size - asm_oss << " [ $1 + " << in_off << "]"; - asm_oss << " , {"; - for(int i = 0; i < n_words; i++){ // return values - if(i > 0) asm_oss << ","; - asm_oss << "$" << 2 + i; - } - asm_oss << "}"; - if (has_l2_evict_policy) asm_oss << ", $" << n_words + 2; - asm_oss << ";"; - // ---- - // create inline ASM signature - // --- - Type* val_arg_ty = IntegerType::get(*ctx_, width); - std::vector arg_tys = {pred->getType(), ptr->getType()}; - for(int ii = 0; ii < n_words; ii++) - arg_tys.push_back(val_arg_ty); - if (has_l2_evict_policy) - arg_tys.push_back(i64_ty); - FunctionType *asm_ty = FunctionType::get(builder_->getVoidTy(), arg_tys, false); - // --- - // create inline ASM constraints - // --- - std::string asm_cstrt = "b,l"; - for(int ii = 0; ii < n_words; ii++){ - asm_cstrt += ","; - asm_cstrt += (width == 64) ? "l" : ((width == 32) ? "r" : "c"); - } - if (has_l2_evict_policy) - asm_cstrt += ",l"; - // --- - // finally call inline ASM - // --- - InlineAsm *_asm = InlineAsm::get(asm_ty, asm_oss.str(), asm_cstrt, true); - std::vector args = {pred, ptr}; - for(unsigned int ii = 0; ii < n_words; ii++){ - size_t n_subw = width / nbits; - Value* curr = UndefValue::get(vec_ty(ty, n_subw)); - for(unsigned int jj = 0; jj < n_subw; jj++){ - Value* new_elt = vals_[val_op][idxs[i + ii*n_subw + jj]]; - if(new_elt->getType()->isIntegerTy(1)) - new_elt = builder_->CreateSExt(new_elt, builder_->getInt8Ty()); - new_elt = bit_cast(new_elt, ty); - curr = builder_->CreateInsertElement(curr, new_elt, jj); - } - args.push_back(bit_cast(curr, val_arg_ty)); - } - if (has_l2_evict_policy) - args.push_back(policies_.at(x->get_eviction_policy())); - call(_asm, args); - } -} -void generator::visit_unmasked_store_inst(ir::unmasked_store_inst* x) { - visit_store_inst(x); -} -void generator::visit_masked_store_inst(ir::masked_store_inst* x) { - visit_store_inst(x); -} - -// -- - -void generator::visit_extract_value_inst(ir::extract_value_inst *x) { - auto idxs = idxs_.at(x); - ir::value* agg = x->get_operand(0); - unsigned insert_idx = x->get_idx(); - for(size_t i = 0; i < idxs.size(); i++){ - auto idx = idxs[i]; - vals_[x][idx] = builder_->CreateExtractValue(vals_[agg][idx], {insert_idx}); - } -} - - -void generator::visit_insert_value_inst(ir::insert_value_inst *x){ - auto idxs = idxs_.at(x); - ir::value* agg = x->get_operand(0); - ir::value* val = x->get_operand(1); - unsigned insert_idx = x->get_idx(); - for(size_t i = 0; i < idxs.size(); i++){ - auto idx = idxs[i]; - vals_[x][idx] = builder_->CreateInsertValue(vals_[agg][idx], vals_[val][idx],{insert_idx}); - } -} - -// -- -/** - * \brief Code Generation for `cat` - */ -void generator::visit_cat_inst(ir::cat_inst* x) { - auto idxs = idxs_.at(x); - ir::value* lhs = x->get_operand(0); - ir::value* rhs = x->get_operand(1); - int i = 0; - for(size_t j = 0; j < idxs_.at(lhs).size(); j ++){ - vals_[x][idxs_[x][i++]] = vals_[lhs][idxs_[lhs][j]]; - } - for(size_t j = 0; j < idxs_.at(rhs).size(); j ++){ - vals_[x][idxs_[x][i++]] = vals_[rhs][idxs_[rhs][j]]; - } -} - - - -/** - * \brief Code Generation for `reshape` - */ -void generator::visit_reshape_inst(ir::reshape_inst* x) { - auto idxs = idxs_.at(x); - for(size_t i = 0; i < idxs_.at(x).size(); i ++){ - ir::value* op = x->get_operand(0); - vals_[x][idxs_[x][i]] = vals_[op][idxs_[op][i]]; - }; -} - -/** - * \brief Code Generation for `splat` - */ -void generator::visit_splat_inst(ir::splat_inst* x) { - for(auto idx: idxs_.at(x)) - vals_[x][idx] = vals_[x->get_operand(0)][{}]; -} - -/** - * \brief Code Generation for `broadcast` - */ -void generator::visit_broadcast_inst(ir::broadcast_inst* x) { - ir::value* op = x->get_operand(0); - const auto& shape = op->get_type()->get_block_shapes(); - for(auto out_idx: idxs_.at(x)){ - indices_t in_idx = out_idx; - for(size_t k = 0; k < in_idx.size(); k++) - in_idx[k] = shape[k] == 1 ? i32(0) : in_idx[k]; - vals_[x][out_idx] = vals_[op][in_idx]; - } -// for(size_t i = 0; i < idxs_.at(x).size(); i++) -// vals_[x][idxs_[x][i]] = vals_[op][idxs_[op][i]]; -} - -/** - * \brief Code Generation for `downcast` - */ -void generator::visit_downcast_inst(ir::downcast_inst* x) { - vals_[x][{}] = vals_[x->get_operand(0)][{i32(0)}]; -} - -/** - * \brief Code Generation for `get_program_id` - */ -void generator::visit_get_program_id_inst(ir::get_program_id_inst* pid) { - Module *module = builder_->GetInsertBlock()->getModule(); - Value *ret = tgt_->get_block_id(module, *builder_, pid->get_axis()); - vals_[pid][{}] = ret; -} - -/** - * \brief Code Generation for `get_num_programs` - */ -void generator::visit_get_num_programs_inst(ir::get_num_programs_inst* np) { - Module *module = builder_->GetInsertBlock()->getModule(); - Value *ret = tgt_->get_num_blocks(module, *builder_, np->get_axis()); - vals_[np][{}] = ret; -} - -/** - * \brief Code Generation for `exp` - */ -void generator::visit_exp_inst(ir::exp_inst* x){ - Constant *log2e = ConstantFP::get(f32_ty, 1.4426950408889634); - std::vector tys = {f32_ty}; - FunctionType *fn_ty = FunctionType::get(f32_ty, tys, false); - InlineAsm *ex2 = InlineAsm::get(fn_ty, "ex2.approx.f32 $0, $0;", "=f,0", false); - for(auto idx: idxs_.at(x)){ - Value *ex2arg = fmul(vals_[x->get_operand(0)][idx], log2e); - // Value *ex2arg = vals_[x->get_operand(0)][idx]; - vals_[x][idx] = call(ex2, std::vector{ex2arg}); - } -} - -/** - * \brief Code Generation for `cos` - */ -void generator::visit_cos_inst(ir::cos_inst* x){ - std::vector tys = {f32_ty}; - FunctionType *fn_ty = FunctionType::get(f32_ty, tys, false); - InlineAsm *cos = InlineAsm::get(fn_ty, "cos.approx.f32 $0, $0;", "=f,0", false); - for(auto idx: idxs_.at(x)){ - vals_[x][idx] = call(cos, std::vector{vals_[x->get_operand(0)][idx]}); - } -} - -/** - * \brief Code Generation for `umulhi` - */ -void generator::visit_umulhi_inst(ir::umulhi_inst* x){ - std::vector tys = {i32_ty, i32_ty}; - FunctionType *fn_ty = FunctionType::get(i32_ty, tys, false); - InlineAsm *umulhi = InlineAsm::get(fn_ty, "mul.hi.u32 $0, $1, $2;", "=r,r,r", false); - for(auto idx: idxs_.at(x)){ - Value* lhs = vals_[x->get_operand(0)][idx]; - Value* rhs = vals_[x->get_operand(1)][idx]; - vals_[x][idx] = call(umulhi, std::vector{lhs, rhs}); - } - } - -/** - * \brief Code Generation for `sin` - */ -void generator::visit_sin_inst(ir::sin_inst* x){ - std::vector tys = {f32_ty}; - FunctionType *fn_ty = FunctionType::get(f32_ty, tys, false); - InlineAsm *sin = InlineAsm::get(fn_ty, "sin.approx.f32 $0, $0;", "=f,0", false); - for(auto idx: idxs_.at(x)){ - vals_[x][idx] = call(sin, std::vector{vals_[x->get_operand(0)][idx]}); - } - } - -/** - * \brief Code Generation for `log` - */ -void generator::visit_log_inst(ir::log_inst* x){ - Constant *rcplog2e = ConstantFP::get(f32_ty, 0.6931471805599453); - std::vector tys = {f32_ty}; - FunctionType *fn_ty = FunctionType::get(f32_ty, tys, false); - InlineAsm *lg2 = InlineAsm::get(fn_ty, "lg2.approx.f32 $0, $1;", "=f,f", false); - for(auto idx: idxs_.at(x)){ - Value *lg2arg = call(lg2, std::vector{vals_[x->get_operand(0)][idx]}); - vals_[x][idx] = fmul(lg2arg, rcplog2e); - } -} - -/** - * \brief Code Generation for `atomic_cas` - */ -void generator::visit_atomic_cas_inst(ir::atomic_cas_inst* cas) { - BasicBlock *current = builder_->GetInsertBlock(); - Module *module = current->getModule(); - Value *tid = tgt_->get_local_id(module, *builder_, 0); - Value *pred = icmp_eq(tid, i32(0)); -// BasicBlock *tid_0_bb = BasicBlock::Create(*ctx_, "tid_0", current->getParent()); -// BasicBlock *tid_0_done_bb = BasicBlock::Create(*ctx_, "tid_0_done", current->getParent()); - add_barrier(); - tgt_->add_memfence(module, *builder_); - Value *atom_ptr; - atom_ptr = gep(shmem_, i32(alloc_->offset(layouts_->get(layouts_->tmp(cas)))), ""); - atom_ptr = bit_cast(atom_ptr, ptr_ty(cvt(cas->get_type()->get_scalar_ty()), 3)); -// cond_br(pred, tid_0_bb, tid_0_done_bb); -// builder_->SetInsertPoint(tid_0_bb); - Value *cas_ptr = vals_[cas->get_operand(0)][{}]; - Value *cas_cmp = vals_[cas->get_operand(1)][{}]; - Value *cas_val = vals_[cas->get_operand(2)][{}]; - std::string asm_str = "@$1 atom.global.cas.b32 $0, [$2], $3, $4;"; - FunctionType *fn_ty = FunctionType::get(i32_ty, {pred->getType(), cas_ptr->getType(), cas_cmp->getType(), cas_val->getType()}, false); - InlineAsm *iasm = InlineAsm::get(fn_ty, asm_str, "=r,b,l,r,r", true); - add_barrier(); - Value *old = call(iasm, {pred, cas_ptr, cas_cmp, cas_val}); - add_barrier(); - - std::string asm2_str = "@$0 st.shared.b32 [$1], $2;"; - FunctionType *fn2_ty = FunctionType::get(void_ty, {pred->getType(), atom_ptr->getType(), old->getType()}, false); - InlineAsm *iasm2 = InlineAsm::get(fn2_ty, asm2_str, "b,r,r", true); - add_barrier(); - call(iasm2, {pred, atom_ptr, old}); - tgt_->add_memfence(module, *builder_); - add_barrier(); - vals_[cas][{}] = load(atom_ptr); - add_barrier(); -} - -/** - * \brief Code Generation for `atomic_rmw` - */ -void generator::visit_atomic_rmw_inst(ir::atomic_rmw_inst *atom) { - ir::value* ptr = atom->get_operand(0); - ir::value* val = atom->get_operand(1); - ir::value* msk = atom->get_operand(2); - - // vector size - int vec = 1; - Value *mask = builder_->getInt1(true); - if(atom->get_type()->is_block_ty()){ - auto shape = atom->get_type()->get_block_shapes(); - int ld = ords_.at(ptr)[0]; - unsigned alignment = alignment_->get(ptr, ld); - vec = std::min(layouts_->get(ptr)->to_scanline()->nts(ld), alignment); - vec = std::min(vec, val->get_type()->get_tile_element_ty()->is_fp16_ty() ? 2 : 1); - // mask out inactive threads - analysis::data_layout* layout = layouts_->get(val); - auto curr_axes = a_axes_->get(val); - auto layt_axes = layout->get_axes(); - for(unsigned k = 0; k < layt_axes.size(); k++){ - unsigned ax = layt_axes.at(k); - distributed_axis dax = axes_.at(ax); - // axis is part of the original layout: thread id should be 0 - // but not the current layout - if(std::find(curr_axes.begin(), curr_axes.end(), ax) == curr_axes.end()) - mask = and_(mask, icmp_eq(dax.thread_id, i32(0))); - } - // last axis may spillover - Value *thread_id = tgt_->get_local_id(mod_, *builder_, 0); - int per_thread = 1; - for(int ax: layt_axes) { per_thread *= axes_.at(ax).contiguous; } - int numel = 1; - for(int s: layout->get_shape()) { numel *= s; } - mask = and_(mask, icmp_ult(mul(thread_id, i32(per_thread)), i32(numel))); - } - - - for(int i = 0; i < idxs_.at(val).size(); i += vec){ - auto idx = idxs_[val][i]; - Value *rmw_val = UndefValue::get(vec_ty(vals_[val][idx]->getType(), vec)); - for(int ii = 0; ii < vec; ii++) - rmw_val = insert_elt(rmw_val, vals_[val][idxs_[val][i+ii]], ii); - Value *rmw_ptr = vals_[ptr][idx]; - Value *rmw_msk = vals_[msk][idx]; - rmw_msk = and_(rmw_msk, mask); - if(vec == 1) - rmw_val = extract_elt(rmw_val, i32(0)); - Type* ty = rmw_val->getType(); - size_t nbits = ty->getScalarSizeInBits(); - // extract pointer offset - std::string offset = ""; - if(GetElementPtrInst *gep = dyn_cast(rmw_ptr)) - if(gep->getNumIndices() == 1) - if(ConstantInt *cst = dyn_cast(gep->idx_begin())){ - offset = " + " + std::to_string(cst->getValue().getSExtValue()*nbits/8); - rmw_ptr = gep->getPointerOperand(); - } - rmw_ptr = bit_cast(rmw_ptr, ty->getPointerTo(1)); - // asm argument type - std::vector arg_ty = {rmw_msk->getType(), rmw_ptr->getType(), rmw_val->getType()}; - // asm function type - FunctionType *fn_ty = FunctionType::get(ty, arg_ty, false); - // asm string - std::string s_nbits = std::to_string(nbits); - std::string name; - std::string s_ty; - using tt = ir::atomic_rmw_op_t; - switch(atom->get_op()){ - case tt::Or: name = "or"; s_ty = "b"; break; - case tt::And: name = "and"; s_ty = "b"; break; - case tt::Xor: name = "xor", s_ty = "b"; break; - case tt::Add: name = "add" , s_ty = "s"; break; - case tt::Min: name = "min", s_ty = "s"; break; - case tt::Max: name = "max", s_ty = "s"; break; - case tt::UMin: name = "min", s_ty = "u"; break; - case tt::UMax: name = "max", s_ty = "u"; break; - case tt::FAdd: name = "add", s_ty = "f"; break; - case tt::Xchg: name = "exch", s_ty = "b"; break; - } - std::string s_vec = vec == 2 ? "x2" : ""; - std::string mod = nbits == 16 ? ".noftz" : ""; - - std::string asm_str = "@$1 atom.global.gpu." + name + mod + "." + s_ty + s_nbits + s_vec + " $0, [$2" + offset + "], $3;"; - std::string ty_id = nbits*vec == 64 ? "l" : (nbits*vec == 32 ? "r" : "h"); - std::string constraint = "=" + ty_id + ",b,l," + ty_id; - // create inline asm - InlineAsm *iasm = InlineAsm::get(fn_ty, asm_str, constraint, true); - // call asm - if(atom->get_type()->is_block_ty()) - vals_[atom][idx] = call(iasm, (ArrayRef{rmw_msk, rmw_ptr, rmw_val})); - else{ - Module *mod = builder_->GetInsertBlock()->getModule(); - tgt_->add_memfence(mod, *builder_); - add_barrier(); - Value *tid = tgt_->get_local_id(mod, *builder_, 0); - rmw_msk = builder_->CreateAnd(rmw_msk, icmp_eq(tid, i32(0))); - Value *old = call(iasm, (ArrayRef{rmw_msk, rmw_ptr, rmw_val})); - Value *atom_ptr; - atom_ptr = gep(shmem_, i32(alloc_->offset(layouts_->get(layouts_->tmp(atom)))), ""); - atom_ptr = bit_cast(atom_ptr, ptr_ty(old->getType(), 3)); - store(old, atom_ptr); - add_barrier(); - vals_[atom][idx] = load(atom_ptr); - add_barrier(); - } - } -} - -/** - * \brief Code Generation for `mma.884` (V100) - */ -//TODO: clean-up -void generator::visit_mma884(ir::dot_inst* C, ir::value *A, ir::value *B, ir::value *D, unsigned NK) { - // shapes - auto shape_c = C->get_type()->get_block_shapes(); - auto shape_a = A->get_type()->get_block_shapes(); - auto shape_b = B->get_type()->get_block_shapes(); - // order - auto ord_a = layouts_->get(A)->get_order(); - auto ord_b = layouts_->get(B)->get_order(); - bool is_a_trans = C->is_trans_a(); - // is_a_trans = false; - if(C->is_trans_a()){ - std::swap(ord_a[0], ord_a[1]); - std::swap(shape_a[0], shape_a[1]); - std::swap(offset_a_m_, offset_a_k_); - } - // std::cout << "visiting" << std::endl; - // if(C->is_trans_b()){ - // std::swap(ord_b[0], ord_b[1]); - // std::swap(shape_b[0], shape_b[1]); - // } - // layouts - analysis::mma_layout* layout_c = layouts_->get(C)->to_mma(); - analysis::shared_layout* layout_a = layouts_->get(A)->to_shared(); - analysis::shared_layout* layout_b = layouts_->get(B)->to_shared(); - // vectorization - int vec_a = swizzle_->get_vec(layout_a); - int vec_b = swizzle_->get_vec(layout_b); - // strides - bool is_a_row = ord_a[0] != 0; - bool is_b_row = ord_b[0] != 0; - int stride_am = is_a_row ? shape_a[1] : 1; - int stride_ak = is_a_row ? 1 : shape_a[0]; - int stride_a0 = is_a_row ? stride_ak : stride_am; - int stride_a1 = is_a_row ? stride_am : stride_ak; - int stride_bn = is_b_row ? 1 : shape_b[0]; - int stride_bk = is_b_row ? shape_b[1] : 1; - int stride_b0 = is_b_row ? stride_bn : stride_bk; - int stride_b1 = is_b_row ? stride_bk : stride_bn; - int stride_rep_m = layout_c->wpt(0) * layout_c->fpw(0) * 8; - int stride_rep_n = layout_c->wpt(1) * layout_c->fpw(1) * 8; - int stride_rep_k = 1; - // swizzling - int per_phase_a = swizzle_->get_per_phase(layout_a); - int max_phase_a = swizzle_->get_max_phase(layout_a); - int step_a0 = is_a_row ? stride_rep_k : stride_rep_m; - int num_ptr_a = std::max(2 * per_phase_a * max_phase_a / step_a0, 1); - int per_phase_b = swizzle_->get_per_phase(layout_b); - int max_phase_b = swizzle_->get_max_phase(layout_b); - int step_b0 = is_b_row ? stride_rep_n : stride_rep_k; - int num_ptr_b = std::max(2 * per_phase_b * max_phase_b / step_b0, 1); - - - // max_phase_a = 4; - // vec_a = 8; - // std::cout << per_phase_a << " " << max_phase_a << " " << step_a0 << " " << num_ptr_a << " " << stride_am << " " << stride_ak << " " << stride_a0 << " " << stride_a1 << std::endl; - // std::cout << vec_a << " " << vec_b << std::endl; - - /* --------------------------------- */ - /* --- pre-compute pointer lanes --- */ - /* --------------------------------- */ - BasicBlock* curr_bb = builder_->GetInsertBlock(); - BasicBlock* entry = &curr_bb->getParent()->getEntryBlock(); - if(entry != curr_bb) - builder_->SetInsertPoint(entry->getTerminator()); - Value* off_a0 = is_a_row ? offset_a_k_[layout_c] : offset_a_m_[layout_c]; - Value* off_a1 = is_a_row ? offset_a_m_[layout_c] : offset_a_k_[layout_c]; - Value* phase_a = urem(udiv(off_a1, i32(per_phase_a)), i32(max_phase_a)); - std::vector off_a(num_ptr_a); - for(int i = 0; i < num_ptr_a; i++){ - Value* off_a0i = add(off_a0, i32(i*(is_a_row?4:stride_rep_m))); - off_a0i = exact_udiv(off_a0i, i32(vec_a)); - off_a0i = xor_(off_a0i, phase_a); - off_a0i = mul(off_a0i, i32(vec_a)); - off_a[i] = add(mul(off_a0i, i32(stride_a0)), mul(off_a1, i32(stride_a1))); - } - Value* off_b0 = is_b_row ? offset_b_n_[layout_c] : offset_b_k_[layout_c]; - Value* off_b1 = is_b_row ? offset_b_k_[layout_c] : offset_b_n_[layout_c]; - Value* phase_b = urem(udiv(off_b1, i32(per_phase_b)), i32(max_phase_b)); - std::vector off_b(num_ptr_b); - for(int i = 0; i < num_ptr_b; i++){ - Value* off_b0i = add(off_b0, i32(i*(is_b_row?stride_rep_n:4))); - off_b0i = udiv(off_b0i, i32(vec_b)); - off_b0i = xor_(off_b0i, phase_b); - off_b0i = mul(off_b0i, i32(vec_b)); - off_b[i] = add(mul(off_b0i, i32(stride_b0)), mul(off_b1, i32(stride_b1))); - } - builder_->SetInsertPoint(curr_bb); - - /* --------------------------------- */ - /* --- MMA intrinsic --- */ - /* --------------------------------- */ - Type *f16x2_ty = vec_ty(f16_ty, 2); - Type *ret_ty = StructType::get(*ctx_, {f32_ty, f32_ty, f32_ty, f32_ty, f32_ty, f32_ty, f32_ty, f32_ty}); - std::vector arg_ty = {f16x2_ty, f16x2_ty, f16x2_ty, f16x2_ty, - f32_ty, f32_ty, f32_ty, f32_ty, f32_ty, f32_ty, f32_ty, f32_ty}; - InlineAsm *mma = InlineAsm::get(FunctionType::get(ret_ty, arg_ty, false), - " mma.sync.aligned.m8n8k4." - + std::string(is_a_row ? "row" : "col") - + "." - + std::string(is_b_row ? "row" : "col") - + ".f32.f16.f16.f32 " - "{$0, $1, $2, $3, $4, $5, $6, $7}, " - "{$8, $9}, " - "{$10, $11}, " - "{$0, $1, $2, $3, $4, $5, $6, $7};", "=f,=f,=f,=f,=f,=f,=f,=f,r,r,r,r,0,1,2,3,4,5,6,7", false); - - - std::vector ptr_a(num_ptr_a); - std::vector ptr_b(num_ptr_b); - std::map, std::pair> has, hbs; - for(int i = 0; i < num_ptr_a; i++) - ptr_a[i] = gep(shmems_[A], off_a[i]); - for(int i = 0; i < num_ptr_b; i++) - ptr_b[i] = gep(shmems_[B], off_b[i]); - - - // initialize accumulators - std::vector acc; - for(indices_t idx: idxs_.at(C)) - acc.push_back(vals_[D][idx]); - - unsigned num_m = layout_c->rep(0) * shape_c[0] / layout_c->shape_per_cta(0); - unsigned num_n = layout_c->rep(1) * shape_c[1] / layout_c->shape_per_cta(1); - - // create mma & unpack result - auto call_mma = [&](unsigned m, unsigned n, unsigned K) { - auto ha = has[{m, K}]; - auto hb = hbs[{n, K}]; - // arguments - std::vector idx = { - (m*2 + 0) + (n*4 + 0)*num_m, (m*2 + 0) + (n*4 + 1)*num_m, - (m*2 + 1) + (n*4 + 0)*num_m, (m*2 + 1) + (n*4 + 1)*num_m, - (m*2 + 0) + (n*4 + 2)*num_m, (m*2 + 0) + (n*4 + 3)*num_m, - (m*2 + 1) + (n*4 + 2)*num_m, (m*2 + 1) + (n*4 + 3)*num_m - }; - std::vector args = {ha.first, ha.second, hb.first, hb.second}; - for(unsigned i = 0; i < 8; i++) - args.push_back(acc[idx[i]]); - // execute mma - Value *nc = call(mma, args); - // unpack - for(unsigned i = 0; i < 8; i++) - acc[idx[i]] = extract_val(nc, {i}); - }; - - ir::phi_node* phiA = dynamic_cast(A); - ir::phi_node* phiB = dynamic_cast(B); - - // Cache lds value. If values are prefetched, create phi node - // @param inc: incoming block (0 = header, 1 = loop) - auto register_lds = - [&](decltype(has)& vals, int m, int K, int inc, Value* val0, Value *val1, bool is_prefetch) { - if (K == 0 && is_prefetch) { - ir::basic_block* inc_block = phiA->get_incoming_block(inc); - lazy_phi_incs_.push_back(std::make_tuple((PHINode*)vals[{m, K}].first, val0, inc_block)); - lazy_phi_incs_.push_back(std::make_tuple((PHINode*)vals[{m, K}].second, val1, inc_block)); - } else - vals[{m, K}] = {val0, val1}; - }; - - auto load_a = [&](int m, int K, int inc, bool is_prefetch) { - int offidx = (is_a_row ? K/4 : m) % num_ptr_a; - Value* ptra; - if(K==0 && is_prefetch){ - if(inc == 0) - ptra = gep(shared_pre_ptr_[layout_a], off_a[offidx]); - else - ptra = gep(shared_next_ptr_[layout_a], off_a[offidx]); - } - else - ptra = ptr_a[offidx]; - int step_am = is_a_row ? m : m / (num_ptr_a)*(num_ptr_a); - int step_ak = is_a_row ? K / (num_ptr_a*vec_a)*(num_ptr_a*vec_a) : K; - Value* pa = gep(ptra, i32(step_am*stride_rep_m*stride_am + step_ak*stride_ak)); - Value* ha = load(bit_cast(pa, ptr_ty(vec_ty(i32_ty, vec_a/2), 3))); - // record lds that needs to be moved - if (K == 0 && inc == 1 && is_prefetch) - prefetch_latch_to_bb_[phiA->get_incoming_value(1)].push_back(ha); - Value *ha00 = bit_cast(extract_elt(ha, i32(0)), f16x2_ty); - Value *ha01 = bit_cast(extract_elt(ha, i32(1)), f16x2_ty); - register_lds(has, m, K, inc, ha00, ha01, is_prefetch); - if(vec_a > 4){ - Value *ha10 = bit_cast(extract_elt(ha, i32(2)), f16x2_ty); - Value *ha11 = bit_cast(extract_elt(ha, i32(3)), f16x2_ty); - if(is_a_row) - register_lds(has, m, K+4, inc, ha10, ha11, is_prefetch); - else - register_lds(has, m+1, K, inc, ha10, ha11, is_prefetch); - } - }; - - auto load_b = [&](int n, int K, int inc, bool is_prefetch) { - int offidx = (is_b_row? n : K/4) % num_ptr_b; - Value* ptrb; - if(K==0 && is_prefetch){ - if(inc == 0) - ptrb = gep(shared_pre_ptr_[layout_b], off_b[offidx]); - else - ptrb = gep(shared_next_ptr_[layout_b], off_b[offidx]); - } else - ptrb = ptr_b[offidx]; - - int stepbn = is_b_row ? n / (num_ptr_b)*(num_ptr_b) : n; - int stepbk = is_b_row ? K : K / (num_ptr_b*vec_b)*(num_ptr_b*vec_b); - Value* pb = gep(ptrb, i32(stepbn*stride_rep_n*stride_bn + stepbk*stride_bk)); - Value* hb = load(bit_cast(pb, ptr_ty(vec_ty(i32_ty, vec_b/2), 3))); - // record lds that needs to be moved - if (K == 0 && inc == 1 && is_prefetch) - prefetch_latch_to_bb_[phiB->get_incoming_value(1)].push_back(hb); - Value *hb00 = bit_cast(extract_elt(hb, i32(0)), f16x2_ty); - Value *hb01 = bit_cast(extract_elt(hb, i32(1)), f16x2_ty); - register_lds(hbs, n, K, inc, hb00, hb01, is_prefetch); - if(vec_b > 4){ - Value *hb10 = bit_cast(extract_elt(hb, i32(2)), f16x2_ty); - Value *hb11 = bit_cast(extract_elt(hb, i32(3)), f16x2_ty); - if(is_b_row) - register_lds(hbs, n+1, K, inc, hb10, hb11, is_prefetch); - else - register_lds(hbs, n, K+4, inc, hb10, hb11, is_prefetch); - } - - }; - - // update accumulators - if (C->is_prefetched()) { - // create phis - builder_->SetInsertPoint(curr_bb->getFirstNonPHI()); - for (unsigned m = 0; m < num_m/2; m += is_a_row?1:2) { - has[{m, 0}].first = phi(f16x2_ty, 2); - has[{m, 0}].second = phi(f16x2_ty, 2); - if (!is_a_row && vec_a>4) { - has[{m+1, 0}].first = phi(f16x2_ty, 2); - has[{m+1, 0}].second = phi(f16x2_ty, 2); - } - } - for (unsigned n = 0; n < num_n/2; n += is_b_row?2:1) { - hbs[{n, 0}].first = phi(f16x2_ty, 2); - hbs[{n, 0}].second = phi(f16x2_ty, 2); - if (is_b_row && vec_b>4) { - hbs[{n+1, 0}].first = phi(f16x2_ty, 2); - hbs[{n+1, 0}].second = phi(f16x2_ty, 2); - } - } - - // insert prefetched lds at the end of loop header - builder_->SetInsertPoint(bbs_[phiA->get_incoming_block(0)]->getTerminator()); - for (unsigned m = 0; m < num_m/2; m += is_a_row?1:2) - load_a(m, 0, 0, true); - for (unsigned n = 0; n < num_n/2; n += is_b_row?2:1) - load_b(n, 0, 0, true); - - // update accumulators - builder_->SetInsertPoint(curr_bb); - for (unsigned K = 0; K < NK; K += 4) { - int NEXTK = (K + 4) % NK; - // prefetch A - for (unsigned m = 0; m < num_m/2; m+=is_a_row?1:2) - load_a(m, NEXTK, 1, true); - // prefetch B - for (unsigned n = 0; n < num_n/2; n+=is_b_row?2:1) - load_b(n, NEXTK, 1, true); - // tensor core ops - for(unsigned m = 0; m < num_m/2; m++) - for(unsigned n = 0; n < num_n/2; n++){ - call_mma(m, n, K); - } - } - } else { // not prefetched - for(unsigned K = 0; K < NK; K += 4) - for(unsigned m = 0; m < num_m/2; m++) - for(unsigned n = 0; n < num_n/2; n++) { - if(has.find({m, K}) == has.end()) - load_a(m, K, /*inc*/0, /*is_prefetch*/false); - if(hbs.find({n, K}) == hbs.end()) - load_b(n, K, /*inc*/0, /*is_prefetch*/false); - call_mma(m, n, K); - } - } - - // write back accumulators - for(size_t i = 0; i < idxs_.at(C).size(); i++) - vals_[C][idxs_[C][i]] = acc[i]; -} - -namespace { -class mma16816_smem_loader { -public: - mma16816_smem_loader(int wpt, std::vector order, int k_order, - std::vector tile_shape, - std::vector instr_shape, std::vector mat_shape, - int per_phase, int max_phase, int dtsize, Builder *builder, - adder add, multiplier mul, geper gep) - : wpt_(wpt), order_(order), k_order_(k_order), tile_shape_(tile_shape), - instr_shape_(instr_shape), mat_shape_(mat_shape), - per_phase_(per_phase), max_phase_(max_phase), dtsize_(dtsize), builder_(builder), - add(add), mul(mul), gep(gep) { - // compute compile-time constant variables & types - c_mat_shape_ = mat_shape[order[0]]; - s_mat_shape_ = mat_shape[order[1]]; - - c_stride_ = tile_shape[order[1]]; - s_stride_ = tile_shape[order[0]]; - - // rule: k must be the fast-changing axis - need_trans_ = k_order_ != order_[0]; - can_use_ldmatrix_ = dtsize == 2 || (!need_trans_); - - // we need more pointers at the fast-changing axis, - if (can_use_ldmatrix_) - num_ptr_ = tile_shape[order[0]] / (order[0] == k_order? 1 : wpt) / instr_shape[order[0]]; - else // warning: this only works for tf32 & need transpose - num_ptr_ = tile_shape[order[0]] / wpt / mat_shape[order[0]]; - num_ptr_ = std::max(num_ptr_, 2); - - // special rule for i8/u8, 4 ptrs for each matrix - if (!can_use_ldmatrix_ && dtsize_ == 1) - num_ptr_ *= 4; - - // load_v4 stride (in num of mats) - int load_stride_in_mat[2]; - load_stride_in_mat[k_order] = 2; // instr_shape[k_order] / mat_shape[k_order], always 2 - load_stride_in_mat[k_order^1] = wpt * (instr_shape[k_order^1] / mat_shape[k_order^1]); - p_load_stride_in_mat_ = load_stride_in_mat[order[0]]; - // stride in mat, used by load_v4 - s_mat_stride_ = load_stride_in_mat[order[1]] / (instr_shape[order[1]]/mat_shape[order[1]]); - } - - std::vector compute_offs(Value *warp_off, Value *lane) { - // TODO: this needs to be moved to constructor (and extracted to arr_order) - mat_arr_stride_ = (k_order_ == 1) ? 1 : wpt_; - warp_off_stride_ = instr_shape_[k_order_^1] / mat_shape_[k_order_^1]; - // start matrix logic offset (rename it as base_mat_off?) - Value *mat_off[2] = {nullptr, nullptr}; - - if (can_use_ldmatrix_) { - // c: lane idx inside a group (a group is a collection of 8 contiguous threads) - // s: group idx (0,1,2,3) inside a warp - Value *c = urem(lane, i32(8)); - Value *s = udiv(lane, i32(8)); - // We can decompose s => s_0, s_1... - Value *s0 = urem(s, i32(2)); - Value *s1 = udiv(s, i32(2)); - - // We use different orders for a & b for better performance. - Value *k_mat_arr = (k_order_ == 1) ? s1 : s0; - Value *nk_mat_arr = (k_order_ == 1) ? s0 : s1; - mat_off[k_order_^1] = add(mul(warp_off, i32(warp_off_stride_)), - mul(nk_mat_arr, i32(mat_arr_stride_))); - mat_off[k_order_] = k_mat_arr; - // physical offset (before swizzling) - Value *c_mat_off = mat_off[order_[0]]; - Value *s_mat_off = mat_off[order_[1]]; - // offset inside a matrix - Value *s_off_in_mat = c; - - std::vector offs(num_ptr_); - Value *phase = urem(udiv(s_off_in_mat, i32(per_phase_)), i32(max_phase_)); - // pre-compute strided offset - Value *s_off = add(s_off_in_mat, mul(s_mat_off, i32(s_mat_shape_))); - for (int i=0; i < num_ptr_; ++i) { - Value *c_mat_off_i = add(c_mat_off, i32(i*p_load_stride_in_mat_)); - c_mat_off_i = xor_(c_mat_off_i, phase); // smem swizzle - offs[i] = add(mul(c_mat_off_i, i32(c_mat_shape_)), mul(s_off, i32(s_stride_))); - } - return offs; - } else if (dtsize_ == 4 && need_trans_) { - // load tf32 matrices with lds32 - Value *c_off_in_mat = udiv(lane, i32(4)); // 4 = mat_shape[order[1]] - Value *s_off_in_mat = urem(lane, i32(4)); // - - Value *phase = urem(udiv(s_off_in_mat, i32(per_phase_)), i32(max_phase_)); - std::vector offs(num_ptr_); - for (int mat = 0; mat < 4; ++mat) { // loads 4 mats each time - int k_mat_arr_int = (k_order_ == 1) ? mat/2 : mat%2; - int nk_mat_arr_int = (k_order_ == 1) ? mat%2 : mat/2; - if (k_mat_arr_int > 0) // we don't need pointers for k - continue; - Value *k_mat_arr = i32(k_mat_arr_int); - Value *nk_mat_arr = i32(nk_mat_arr_int); - // physical offset (before swizzling) - Value *c_mat_off = add(mul(warp_off, i32(warp_off_stride_)), - mul(nk_mat_arr, i32(mat_arr_stride_))); - Value *s_mat_off = k_mat_arr; // always 0? - Value *s_off = add(s_off_in_mat, mul(s_mat_off, i32(s_mat_shape_))); - // FIXME: (k_order_ == 1?) is really dirty hack - for (int i = 0; i < num_ptr_/2; ++i) { - Value *c_mat_off_i = add(c_mat_off, i32(i*p_load_stride_in_mat_*(k_order_ == 1?1:2))); - c_mat_off_i = xor_(c_mat_off_i, phase); - Value *c_off = add(c_off_in_mat, mul(c_mat_off_i, i32(c_mat_shape_))); - // TODO: move this out of the loop - c_off = urem(c_off, i32(tile_shape_[order_[0]])); - s_off = urem(s_off, i32(tile_shape_[order_[1]])); - offs[2*i + nk_mat_arr_int] = add(c_off, mul(s_off, i32(s_stride_))); - } - } - return offs; - // throw std::runtime_error("not implemented"); - } else if (dtsize_ == 1 && need_trans_) { - // load i8/u8 matrices with lds8 - Value *c_off_in_mat = udiv(lane, i32(4)); // - Value *s_off_in_mat = mul(urem(lane, i32(4)), i32(4)); // each thread load 4 cols - - // Value *phase = urem(udiv(s_off_in_mat, i32(per_phase_)), i32(max_phase_)); - std::vector offs(num_ptr_); - for (int mat = 0; mat < 4; ++mat) { // loads 4 mats each time - int k_mat_arr_int = (k_order_ == 1) ? mat/2 : mat%2; - int nk_mat_arr_int = (k_order_ == 1) ? mat%2 : mat/2; - if (k_mat_arr_int > 0) // we don't need pointers for k - continue; - Value *k_mat_arr = i32(k_mat_arr_int); - Value *nk_mat_arr = i32(nk_mat_arr_int); - // physical offset (before swizzling) - Value *c_mat_off = add(mul(warp_off, i32(warp_off_stride_)), - mul(nk_mat_arr, i32(mat_arr_stride_))); - Value *s_mat_off = k_mat_arr; // always 0? - - for (int loadx4_off = 0; loadx4_off < num_ptr_/8; ++loadx4_off) { - for (int elem_off = 0; elem_off < 4; ++elem_off) { - int ptr_off = loadx4_off*8 + nk_mat_arr_int*4 + elem_off; - - Value *c_mat_off_i = add(c_mat_off, i32(loadx4_off*p_load_stride_in_mat_*(k_order_ == 1?1:2))); - Value *s_off_in_mat_elem = add(s_off_in_mat, i32(elem_off)); - - // disable swizzling ... - // Value *phase = urem(udiv(s_off_in_mat, i32(per_phase_)), i32(max_phase_)); - // c_mat_off_i = xor_(c_mat_off_i, phase); - - Value *c_off = add(c_off_in_mat, mul(c_mat_off_i, i32(c_mat_shape_))); - Value *s_off = add(s_off_in_mat_elem, mul(s_mat_off, i32(s_mat_shape_))); - // To prevent out-of-bound access when the tile is too small - c_off = urem(c_off, i32(tile_shape_[order_[0]])); - s_off = urem(s_off, i32(tile_shape_[order_[1]])); - offs[ptr_off] = add(c_off, mul(s_off, i32(s_stride_))); - } - } - } - return offs; - } else - throw std::runtime_error("invalid smem load config"); - } - - std::tuple - load_x4(int mat0, int mat1, int inc, bool is_prefetch, ir::phi_node *pn, - Value *pre_ptr, Value *next_ptr, std::vector &off, std::vector &ptrs, - FunctionType *ldmatrix_ty, Type *smem_ptr_ty, - std::map> &prefetch_latch_to_bb_) { - assert(mat0 % 2 == 0 && mat1 % 2 == 0 && "smem matrix load must be aligned"); - int mat_idx[2] = {mat0, mat1}; - int k = mat_idx[k_order_]; - - int ptr_idx = -1; - if (can_use_ldmatrix_) - ptr_idx = mat_idx[order_[0]] / (instr_shape_[order_[0]] / mat_shape_[order_[0]]); - else if (dtsize_ == 4 && need_trans_) // tf32 & trans - ptr_idx = mat_idx[order_[0]]; - else // i8 & trans - ptr_idx = mat_idx[order_[0]] * 4; - - auto get_ptr = [&](int idx) -> Value* { - Value *ptr = nullptr; - if (k == 0 && is_prefetch) { - if (inc == 0) - ptr = bit_cast(gep(pre_ptr, off.at(idx)), smem_ptr_ty); - else - ptr = bit_cast(gep(next_ptr, off.at(idx)), smem_ptr_ty); - } else - ptr = ptrs.at(idx); - return ptr; - }; - Value *ptr = get_ptr(ptr_idx); - - Value *res_v4 = nullptr; - if (can_use_ldmatrix_) { - std::string trans = need_trans_ ? ".trans" : ""; - // the offset (in byte) on the strided axis is a constant - int s_offset = mat_idx[order_[1]] * (s_mat_stride_*s_mat_shape_) * s_stride_ * dtsize_; - InlineAsm *ld_fn = InlineAsm::get(ldmatrix_ty, - "ldmatrix.sync.aligned.m8n8.x4" + trans + ".shared.b16 " - "{$0, $1, $2, $3}, " - "[$4 + " + std::to_string(s_offset) + "];", - "=r,=r,=r,=r,r", true); - assert(ptr); - res_v4 = call(ldmatrix_ty, ld_fn, {ptr}); - if (k == 0 && inc == 1 && is_prefetch) - prefetch_latch_to_bb_[pn->get_incoming_value(1)].push_back(res_v4); - return {extract_val(res_v4, std::vector{0}), - extract_val(res_v4, std::vector{1}), - extract_val(res_v4, std::vector{2}), - extract_val(res_v4, std::vector{3})}; - } else if (dtsize_ == 4 && need_trans_) { // use lds.32 to load tf32 matrices - Value *ptr2 = get_ptr(ptr_idx+1); - assert(s_mat_stride_ == 1); - int s_offset_elem = mat_idx[order_[1]] * (s_mat_stride_*s_mat_shape_) * s_stride_; - int s_offset_arr_elem = 1 * (s_mat_stride_*s_mat_shape_) * s_stride_; - Value *elem0, *elem1, *elem2, *elem3; - if (k_order_ == 1) { - elem0 = load(gep(ptr, i32(s_offset_elem))); - elem1 = load(gep(ptr2, i32(s_offset_elem))); - elem2 = load(gep(ptr, i32(s_offset_elem + s_offset_arr_elem))); - elem3 = load(gep(ptr2, i32(s_offset_elem + s_offset_arr_elem))); - } else { // for b (k first) - elem0 = load(gep(ptr, i32(s_offset_elem))); - elem2 = load(gep(ptr2, i32(s_offset_elem))); - elem1 = load(gep(ptr, i32(s_offset_elem + s_offset_arr_elem))); - elem3 = load(gep(ptr2, i32(s_offset_elem + s_offset_arr_elem))); - } - if (k == 0 && inc == 1 && is_prefetch) { - prefetch_latch_to_bb_[pn->get_incoming_value(1)].push_back(elem0); - prefetch_latch_to_bb_[pn->get_incoming_value(1)].push_back(elem1); - prefetch_latch_to_bb_[pn->get_incoming_value(1)].push_back(elem2); - prefetch_latch_to_bb_[pn->get_incoming_value(1)].push_back(elem3); - } - return {elem0, elem1, elem2, elem3}; - } else if (dtsize_ == 1 && need_trans_) { // use lds.8 to load i8/u8 matrices - Value *ptr00 = get_ptr(ptr_idx); - Value *ptr01 = get_ptr(ptr_idx+1); - Value *ptr02 = get_ptr(ptr_idx+2); - Value *ptr03 = get_ptr(ptr_idx+3); - - Value *ptr10 = get_ptr(ptr_idx+4); - Value *ptr11 = get_ptr(ptr_idx+5); - Value *ptr12 = get_ptr(ptr_idx+6); - Value *ptr13 = get_ptr(ptr_idx+7); - - assert(s_mat_stride_ == 1); - int s_offset_elem = mat_idx[order_[1]] * (s_mat_stride_*s_mat_shape_) * s_stride_; - int s_offset_arr_elem = 1 * (s_mat_stride_*s_mat_shape_) * s_stride_; - - Value *i8v4_elems[4]; - Value *i32_elems[4]; - for (int i=0; i<4; ++i) - i8v4_elems[i] = UndefValue::get(vec_ty(i8_ty, 4)); - - Value *elem00, *elem01, *elem02, *elem03; - Value *elem10, *elem11, *elem12, *elem13; - Value *elem20, *elem21, *elem22, *elem23; - Value *elem30, *elem31, *elem32, *elem33; - Value *i8_elems[4*4]; - if (k_order_ == 1) { // - i8_elems[0*4 + 0] = load(gep(ptr00, i32(s_offset_elem))); - i8_elems[0*4 + 1] = load(gep(ptr01, i32(s_offset_elem))); - i8_elems[0*4 + 2] = load(gep(ptr02, i32(s_offset_elem))); - i8_elems[0*4 + 3] = load(gep(ptr03, i32(s_offset_elem))); - - assert(i8_elems[0*4 + 0]->getType()->isIntegerTy(8)); - - i8_elems[1*4 + 0] = load(gep(ptr10, i32(s_offset_elem))); - i8_elems[1*4 + 1] = load(gep(ptr11, i32(s_offset_elem))); - i8_elems[1*4 + 2] = load(gep(ptr12, i32(s_offset_elem))); - i8_elems[1*4 + 3] = load(gep(ptr13, i32(s_offset_elem))); - - i8_elems[2*4 + 0] = load(gep(ptr00, i32(s_offset_elem + s_offset_arr_elem))); - i8_elems[2*4 + 1] = load(gep(ptr01, i32(s_offset_elem + s_offset_arr_elem))); - i8_elems[2*4 + 2] = load(gep(ptr02, i32(s_offset_elem + s_offset_arr_elem))); - i8_elems[2*4 + 3] = load(gep(ptr03, i32(s_offset_elem + s_offset_arr_elem))); - - i8_elems[3*4 + 0] = load(gep(ptr10, i32(s_offset_elem + s_offset_arr_elem))); - i8_elems[3*4 + 1] = load(gep(ptr11, i32(s_offset_elem + s_offset_arr_elem))); - i8_elems[3*4 + 2] = load(gep(ptr12, i32(s_offset_elem + s_offset_arr_elem))); - i8_elems[3*4 + 3] = load(gep(ptr13, i32(s_offset_elem + s_offset_arr_elem))); - - for (int m=0; m<4; ++m) { - for (int e=0; e<4; ++e) - i8v4_elems[m] = insert_elt(i8v4_elems[m], i8_elems[m*4 + e], e); - i32_elems[m] = bit_cast(i8v4_elems[m], i32_ty); - } - } else { // for b (k first) - i8_elems[0*4 + 0] = load(gep(ptr00, i32(s_offset_elem))); - i8_elems[0*4 + 1] = load(gep(ptr01, i32(s_offset_elem))); - i8_elems[0*4 + 2] = load(gep(ptr02, i32(s_offset_elem))); - i8_elems[0*4 + 3] = load(gep(ptr03, i32(s_offset_elem))); - - assert(i8_elems[0*4 + 0]->getType()->isIntegerTy(8)); - - i8_elems[2*4 + 0] = load(gep(ptr10, i32(s_offset_elem))); - i8_elems[2*4 + 1] = load(gep(ptr11, i32(s_offset_elem))); - i8_elems[2*4 + 2] = load(gep(ptr12, i32(s_offset_elem))); - i8_elems[2*4 + 3] = load(gep(ptr13, i32(s_offset_elem))); - - i8_elems[1*4 + 0] = load(gep(ptr00, i32(s_offset_elem + s_offset_arr_elem))); - i8_elems[1*4 + 1] = load(gep(ptr01, i32(s_offset_elem + s_offset_arr_elem))); - i8_elems[1*4 + 2] = load(gep(ptr02, i32(s_offset_elem + s_offset_arr_elem))); - i8_elems[1*4 + 3] = load(gep(ptr03, i32(s_offset_elem + s_offset_arr_elem))); - - i8_elems[3*4 + 0] = load(gep(ptr10, i32(s_offset_elem + s_offset_arr_elem))); - i8_elems[3*4 + 1] = load(gep(ptr11, i32(s_offset_elem + s_offset_arr_elem))); - i8_elems[3*4 + 2] = load(gep(ptr12, i32(s_offset_elem + s_offset_arr_elem))); - i8_elems[3*4 + 3] = load(gep(ptr13, i32(s_offset_elem + s_offset_arr_elem))); - - for (int m=0; m<4; ++m) { - for (int e=0; e<4; ++e) - i8v4_elems[m] = insert_elt(i8v4_elems[m], i8_elems[m*4 + e], e); - i32_elems[m] = bit_cast(i8v4_elems[m], i32_ty); - } - } - if (k == 0 && inc == 1 && is_prefetch) { - for (int m = 0; m < 4; ++m) - for (int e = 0; e < 4; ++e) - prefetch_latch_to_bb_[pn->get_incoming_value(1)].push_back(i8_elems[m*4 + e]); - } - return {i32_elems[0], i32_elems[1], i32_elems[2], i32_elems[3]}; - } else - throw std::runtime_error("invalid smem load"); - } - - int get_num_ptr() const { return num_ptr_; } - -private: - int wpt_; - std::vector order_; - int k_order_; - std::vector tile_shape_; - std::vector instr_shape_; - std::vector mat_shape_; - int per_phase_, max_phase_; - int dtsize_; - - // generated - int c_mat_shape_, s_mat_shape_; - int c_stride_, s_stride_; - // p_: on the pointer axis - int p_load_stride_in_mat_; - int s_mat_stride_; - // stride when moving to next not-k mat - int warp_off_stride_; - int mat_arr_stride_; // matrix arrangement (inside a load) stride - bool need_trans_, can_use_ldmatrix_; - int num_ptr_; - - Builder *builder_; - adder add; - multiplier mul; - geper gep; -}; -} - -/** - * \brief Code Generation for `mma.16816` (A100) - */ -//TODO: clean-up -void generator::visit_mma16816(ir::dot_inst* C, ir::value *A, ir::value *B, ir::value *D, unsigned NK) { - const std::vector& shapes = C->get_type()->get_block_shapes(); - std::map, std::vector> fcs; - for(indices_t idx: idxs_.at(C)){ - std::vector key(idx.size() - 2); - std::copy(idx.begin() + 2, idx.end(), key.begin()); - fcs[key].push_back(vals_[D][idx]); - }; - auto shape_a = A->get_type()->get_block_shapes(); - auto shape_b = B->get_type()->get_block_shapes(); - auto ord_a = layouts_->get(A)->get_order(); - if(C->is_trans_a()){ - std::swap(ord_a[0], ord_a[1]); - std::swap(shape_a[0], shape_a[1]); - } - auto ord_b = layouts_->get(B)->get_order(); - if(C->is_trans_b()){ - std::swap(ord_b[0], ord_b[1]); - std::swap(shape_b[0], shape_b[1]); - } - NK = shape_a[1]; - analysis::mma_layout* layout = layouts_->get(C)->to_mma(); - - std::vector mma_instr_shape = layout->get_mma_instr_shape(); - const int mma_instr_m = mma_instr_shape[0]; - const int mma_instr_n = mma_instr_shape[1]; - const int mma_instr_k = mma_instr_shape[2]; - - std::vector mat_shape = layout->get_mma_mat_shape(); - const int mat_shape_m = mat_shape[0]; - const int mat_shape_n = mat_shape[1]; - const int mat_shape_k = mat_shape[2]; - - - const int num_rep_m = shapes[0] / layout->shape_per_cta(0); - const int num_rep_n = shapes[1] / layout->shape_per_cta(1); - const int num_rep_k = std::max(NK/mma_instr_k, 1); - - // floating point types - Type *fp32_ty = f32_ty; - Type *fp16x2_ty = vec_ty(f16_ty, 2); - Type *bf16x2_ty = vec_ty(bf16_ty, 2); - Type *fp16x2_pack4_ty = StructType::get(*ctx_, std::vector{fp16x2_ty, fp16x2_ty, fp16x2_ty, fp16x2_ty}); - Type *bf16x2_pack4_ty = StructType::get(*ctx_, std::vector{bf16x2_ty, bf16x2_ty, bf16x2_ty, bf16x2_ty}); - Type *fp32_pack4_ty = StructType::get(*ctx_, std::vector{fp32_ty, fp32_ty, fp32_ty, fp32_ty}); - // integer types - Type *i8x4_ty = vec_ty(i8_ty, 4); - Type *i8x4_pack4_ty = StructType::get(*ctx_, std::vector{i8x4_ty, i8x4_ty, i8x4_ty, i8x4_ty}); - Type *i32_pack4_ty = StructType::get(*ctx_, std::vector{i32_ty, i32_ty, i32_ty, i32_ty}); - - - FunctionType *ldmatrix_ty = nullptr; - FunctionType *mma_ty = nullptr; - Type *phi_ty = nullptr; - Type *smem_ptr_ty = nullptr; - - ir::type *A_ir_ty = A->get_type()->get_scalar_ty(); - ir::type *B_ir_ty = B->get_type()->get_scalar_ty(); - if (A_ir_ty->is_fp16_ty() && B_ir_ty->is_fp16_ty()) { - mma_ty = FunctionType::get(fp32_pack4_ty, std::vector{fp16x2_ty, fp16x2_ty, fp16x2_ty, fp16x2_ty, fp16x2_ty, fp16x2_ty, fp32_ty, fp32_ty, fp32_ty, fp32_ty}, false); - smem_ptr_ty = ptr_ty(f16_ty, 3); - ldmatrix_ty = FunctionType::get(fp16x2_pack4_ty, std::vector{smem_ptr_ty}, false); - phi_ty = fp16x2_ty; - } else if (A_ir_ty->is_bf16_ty() && B_ir_ty->is_bf16_ty()) { - mma_ty = FunctionType::get(fp32_pack4_ty, std::vector{bf16x2_ty, bf16x2_ty, bf16x2_ty, bf16x2_ty, bf16x2_ty, bf16x2_ty, fp32_ty, fp32_ty, fp32_ty, fp32_ty}, false); - smem_ptr_ty = ptr_ty(bf16_ty, 3); - ldmatrix_ty = FunctionType::get(bf16x2_pack4_ty, std::vector{smem_ptr_ty}, false); - phi_ty = bf16x2_ty; - } else if (A_ir_ty->is_fp32_ty() && B_ir_ty->is_fp32_ty()) { - mma_ty = FunctionType::get(fp32_pack4_ty, std::vector{fp32_ty, fp32_ty, fp32_ty, fp32_ty, fp32_ty, fp32_ty, fp32_ty, fp32_ty, fp32_ty, fp32_ty}, false); - smem_ptr_ty = ptr_ty(fp32_ty, 3); - ldmatrix_ty = FunctionType::get(fp32_pack4_ty, std::vector{smem_ptr_ty}, false); - phi_ty = fp32_ty; - } else if (A_ir_ty->is_integer_ty(8) && B_ir_ty->is_integer_ty(8)) { - // FIXME: We should use i8 here (but nvptx will generate extra casts when using i8) - mma_ty = FunctionType::get(i32_pack4_ty, std::vector{i32_ty, i32_ty, i32_ty, i32_ty, i32_ty, i32_ty, i32_ty, i32_ty, i32_ty, i32_ty}, false); - smem_ptr_ty = ptr_ty(i8_ty, 3); - ldmatrix_ty = FunctionType::get(i32_pack4_ty, std::vector{smem_ptr_ty}, false); - phi_ty = i32_ty; - // mma_ty = FunctionType::get(i32_pack4_ty, std::vector{i8x4_ty, i8x4_ty, i8x4_ty, i8x4_ty, i8x4_ty, i8x4_ty, i32_ty, i32_ty, i32_ty, i32_ty}, false); - // smem_ptr_ty = ptr_ty(i8_ty, 3); - // ldmatrix_ty = FunctionType::get(i8x4_pack4_ty, std::vector{smem_ptr_ty}, false); - // phi_ty = i8x4_ty; - } else - throw std::runtime_error("mma16816 data type not supported"); - - // left-hand-side values - std::map, Value*> ha; - std::map, Value*> hb; - - BasicBlock* CurrBB = builder_->GetInsertBlock(); - BasicBlock* FirstBB = &CurrBB->getParent()->getEntryBlock(); - - // if true, this will move pointer declarations to the entry basic block - // not prefetched cases tend to be more limited in resource usage - // so we don't pre-compute ptrs to save registers - bool licm_ptrs = C->is_prefetched() && (FirstBB != CurrBB); - if(licm_ptrs) - builder_->SetInsertPoint(FirstBB->getTerminator()); - - Value* thread = tgt_->get_local_id(mod_, *builder_, 0); - Value *lane = urem(thread, i32(32)); - Value *warp = udiv(thread, i32(32)); - Value *warp_mn = udiv(warp, i32(layout->wpt(0))); - Value *warp_m = urem(warp, i32(layout->wpt(0))); - Value *warp_n = urem(warp_mn, i32(layout->wpt(1))); - std::vector& fc = fcs.begin()->second; - - size_t dtsize_a = A->get_type()->get_scalar_ty()->get_primitive_size_in_bits() / 8; - size_t dtsize_b = B->get_type()->get_scalar_ty()->get_primitive_size_in_bits() / 8; - - ir::phi_node* phiA = dynamic_cast(A); - ir::phi_node* phiB = dynamic_cast(B); - auto register_lds2 = - [&](std::map, Value*>& vals, int mn, int k, int inc, Value* val, bool is_prefetch) { - if (k < 2 && is_prefetch) { - ir::basic_block* inc_block = phiA->get_incoming_block(inc); - lazy_phi_incs_.push_back(std::make_tuple((PHINode*)vals[{mn, k}], val, inc_block)); - } else - vals[{mn, k}] = val; - }; - - // | -> k (row-major), since we have ldmatrix.trans, we only need to change stride - // v (s0_0(0), s1_0(2), | *num_rep_k - // m s0_1(1), s1_1(3)) | (stride in num of matrices(mat_stride_ak): 2) - // ----------- - // *num_rep_m (stride in num of matrices(mat_stride_am): 2*layout->wpt(0)) - std::function load_a; - analysis::shared_layout* layout_a = layouts_->get(C->get_operand(0))->to_shared(); - bool is_a_shared = layout_a != nullptr; - if(is_a_shared) { - const int per_phase_a = swizzle_->get_per_phase(layout_a); - const int max_phase_a = swizzle_->get_max_phase(layout_a); - mma16816_smem_loader a_loader(layout->wpt(0), ord_a, /*k_order*/1, shape_a, - {mma_instr_m, mma_instr_k}, {mat_shape_m, mat_shape_k}, - per_phase_a, max_phase_a, dtsize_a, builder_, add, mul, gep); - std::vector off_a = a_loader.compute_offs(warp_m, lane); - int num_ptr_a = a_loader.get_num_ptr(); - // pointers - std::vector ptrs_a(num_ptr_a); - if(licm_ptrs) - builder_->SetInsertPoint(CurrBB); - for(int i = 0; i < num_ptr_a; i++) - ptrs_a[i] = bit_cast(gep(shmems_[A], {off_a[i]}), smem_ptr_ty); - if(licm_ptrs) - builder_->SetInsertPoint(FirstBB->getTerminator()); - // loading function - load_a = [&,a_loader,ptrs_a,off_a](int m, int k, int inc, bool is_prefetch) mutable { - auto [ha0, ha1, ha2, ha3] = a_loader.load_x4(m, k, inc, is_prefetch, phiA, shared_pre_ptr_[layout_a], - shared_next_ptr_[layout_a], off_a, ptrs_a, - ldmatrix_ty, smem_ptr_ty, prefetch_latch_to_bb_); - register_lds2(ha, m, k, inc, ha0, is_prefetch); - register_lds2(ha, m+1, k, inc, ha1, is_prefetch); - register_lds2(ha, m, k+1, inc, ha2, is_prefetch); - register_lds2(ha, m+1, k+1, inc, ha3, is_prefetch); - }; - } - else { - load_a = [&](int m, int k, int inc, bool is_prefetch) { - distributed_axis ax_n = axes_.at(a_axes_->get(A, 1)); - int ldm = ax_n.values.size(); - if(ldm != num_rep_k*4) - throw std::runtime_error("Internal compiler error when trying to fuse matmuls!"); - // std::cout << m << " " << k << std::endl; - // std::cout << idxs_[A].size() << std::endl; - // std::cout << (m+1)*ldm + k*2 + 3 << std::endl; - // int ldm = num_rep_k*4; - Value* ha0 = UndefValue::get(phi_ty); // e.g., fp16x2 - Value* ha1 = UndefValue::get(phi_ty); - Value* ha2 = UndefValue::get(phi_ty); - Value* ha3 = UndefValue::get(phi_ty); - ha0 = builder_->CreateInsertElement(ha0, vals_[A][idxs_[A][(m+0)*ldm + k*2 + 0]], i32(0)); - ha0 = builder_->CreateInsertElement(ha0, vals_[A][idxs_[A][(m+0)*ldm + k*2 + 1]], i32(1)); - ha1 = builder_->CreateInsertElement(ha1, vals_[A][idxs_[A][(m+1)*ldm + k*2 + 0]], i32(0)); - ha1 = builder_->CreateInsertElement(ha1, vals_[A][idxs_[A][(m+1)*ldm + k*2 + 1]], i32(1)); - ha2 = builder_->CreateInsertElement(ha2, vals_[A][idxs_[A][(m+0)*ldm + k*2 + 2]], i32(0)); - ha2 = builder_->CreateInsertElement(ha2, vals_[A][idxs_[A][(m+0)*ldm + k*2 + 3]], i32(1)); - ha3 = builder_->CreateInsertElement(ha3, vals_[A][idxs_[A][(m+1)*ldm + k*2 + 2]], i32(0)); - ha3 = builder_->CreateInsertElement(ha3, vals_[A][idxs_[A][(m+1)*ldm + k*2 + 3]], i32(1)); - ha[{m, k}] = ha0; - ha[{m+1, k}] = ha1; - ha[{m, k+1}] = ha2; - ha[{m+1, k+1}] = ha3; - }; - } - - - // | -> n (col-major) - // v (s0_0(0), | (stride: wpt(1)) | s1_0(2) | *num_rep_n - // k s0_1(1), | | s1_1(3)) | (stride in num of matrices(mat_stride_bn): wpt(1)) - // ----------- - // *num_rep_k (stride in num of matrices(mat_stride_bk): 2) - analysis::shared_layout* layout_b = layouts_->get(C->get_operand(1))->to_shared(); - const int per_phase_b = swizzle_->get_per_phase(layout_b); - const int max_phase_b = swizzle_->get_max_phase(layout_b); - std::vector mma_instr_b{mma_instr_k, mma_instr_n}; - std::vector mat_shape_b{mat_shape_k, mat_shape_n}; - int k_order_b = 0; - // if(C->is_trans_b()){ - // std::swap(mma_instr_b[0], mma_instr_b[1]); - // std::swap(mat_shape_b[0], mat_shape_b[1]); - // k_order_b = k_order_b ^ 1; - // std::swap(ord_b[0], ord_b[1]); - // std::swap(shape_b[0], shape_b[1]); - // } - - mma16816_smem_loader b_loader(layout->wpt(1), ord_b, k_order_b, shape_b, - mma_instr_b, mat_shape_b, - per_phase_b, max_phase_b, dtsize_b, builder_, add, mul, gep); - std::vector off_b = b_loader.compute_offs(warp_n, lane); - - if(licm_ptrs) - builder_->SetInsertPoint(CurrBB); - // pointers - int num_ptr_b = b_loader.get_num_ptr(); - std::vector ptrs_b(num_ptr_b); - for(int i = 0; i < num_ptr_b; i++) - ptrs_b[i] = bit_cast(gep(shmems_[B], {off_b[i]}), smem_ptr_ty); - - - // loading function - std::function load_b; - load_b = [&](int n, int k, int inc, bool is_prefetch) { - auto [hb0, hb1, hb2, hb3] = b_loader.load_x4(k, n, inc, is_prefetch, phiB, shared_pre_ptr_[layout_b], - shared_next_ptr_[layout_b], off_b, ptrs_b, - ldmatrix_ty, smem_ptr_ty, prefetch_latch_to_bb_); - register_lds2(hb, n, k, inc, hb0, is_prefetch); - register_lds2(hb, n+1, k, inc, hb2, is_prefetch); - register_lds2(hb, n, k+1, inc, hb1, is_prefetch); - register_lds2(hb, n+1, k+1, inc, hb3, is_prefetch); - }; - - - - // create mma & unpack result, m, n, k are offsets in mat - auto call_mma = [&](unsigned m, unsigned n, unsigned k) { - InlineAsm *mma_fn = InlineAsm::get(mma_ty, layout->get_ptx_instr() + - " {$0, $1, $2, $3}," - " {$4, $5, $6, $7}," - " {$8, $9}," - " {$10, $11, $12, $13};", - "=r,=r,=r,=r,r,r,r,r,r,r,0,1,2,3", true); - unsigned cols_per_thread = num_rep_n * 2; - std::vector idx = { - (m + 0)*cols_per_thread + (n*2 + 0), - (m + 0)*cols_per_thread + (n*2 + 1), - (m + 1)*cols_per_thread + (n*2 + 0), - (m + 1)*cols_per_thread + (n*2 + 1) - }; - Value *nc = call(mma_ty, mma_fn, - {ha[{m, k}], ha[{m+1, k}], ha[{m, k+1}], ha[{m+1, k+1}], - hb[{n, k}], hb[{n, k+1}], - fc[idx[0]], fc[idx[1]], fc[idx[2]], fc[idx[3]]}); - fc[idx[0]] = extract_val(nc, std::vector{0}); - fc[idx[1]] = extract_val(nc, std::vector{1}); - fc[idx[2]] = extract_val(nc, std::vector{2}); - fc[idx[3]] = extract_val(nc, std::vector{3}); - }; - if (C->is_prefetched()) { - // create phis - builder_->SetInsertPoint(CurrBB->getFirstNonPHI()); - for(unsigned m = 0; m < num_rep_m; m++){ - ha[{2*m, 0}] = phi(phi_ty, 2); - ha[{2*m+1, 0}] = phi(phi_ty, 2); - ha[{2*m, 1}] = phi(phi_ty, 2); - ha[{2*m+1, 1}] = phi(phi_ty, 2); - } - for(unsigned n = 0; n < num_rep_n; n+=2){ - hb[{n, 0}] = phi(phi_ty, 2); - hb[{n+1, 0}] = phi(phi_ty, 2); - hb[{n, 1}] = phi(phi_ty, 2); - hb[{n+1, 1}] = phi(phi_ty, 2); - } - // insert prefetched lds at the end of loop header - builder_->SetInsertPoint(bbs_[phiA->get_incoming_block(0)]->getTerminator()); - for(unsigned m = 0; m < num_rep_m; m++) - load_a(2*m, 0, 0, true); - for(unsigned n = 0; n < num_rep_n; n+=2) - load_b(n, 0, 0, true); - // update accumulators - builder_->SetInsertPoint(CurrBB); - for(unsigned k = 0; k < num_rep_k; ++k){ // stride of instr in mat is 2 - int next_k = (k + 1) % num_rep_k; - // prefetch A - for(unsigned m = 0; m < num_rep_m; m++) - load_a(2*m, 2*next_k, 1, true); - // prefetch B - for(unsigned n = 0; n < num_rep_n; n+=2) - load_b(n, 2*next_k, 1, true); - // tensor core ops - for(unsigned m = 0; m < num_rep_m; m++) - for(unsigned n = 0; n < num_rep_n; n++){ - call_mma(2*m, n, 2*k); - } - } - } - else{ - for (unsigned k = 0; k < num_rep_k; k++) { - for (unsigned m = 0; m < num_rep_m; m++) - load_a(2*m, 2*k, 0, /*is_prefetch*/false); - for (unsigned n = 0; n < num_rep_n; n+=2) - load_b(n, 2*k, 0, /*is_prefetch*/false); - for (unsigned m = 0; m < num_rep_m; m++) - for (unsigned n = 0; n < num_rep_n; n++) - call_mma(2*m, n, 2*k); - } - } - // write back - unsigned i = 0; - for(indices_t idx: idxs_.at(C)){ - std::vector key(idx.size() - 2); - std::copy(idx.begin() + 2, idx.end(), key.begin()); - if(i >= fcs.at(key).size()) - i = 0; - vals_[C][idx] = fcs.at(key)[i++]; - }; - -} - -/** - * \brief Code Generation for FMA-based `dot` (FP32, FP64, Default) - */ -void generator::visit_fmadot(ir::dot_inst* C, ir::value* A, ir::value* B, ir::value* D, unsigned NK, Type *c_ty, Function *f_mul_add) { - auto shape_c = C->get_type()->get_block_shapes(); - auto shape_a = A->get_type()->get_block_shapes(); - auto shape_b = B->get_type()->get_block_shapes(); - auto ord_a = layouts_->get(A)->get_order(); - auto ord_b = layouts_->get(B)->get_order(); - analysis::scanline_layout* layout_c = layouts_->get(C)->to_scanline(); - analysis::shared_layout* layout_a = (analysis::shared_layout*)layouts_->get(C->get_operand(0)); - analysis::shared_layout* layout_b = (analysis::shared_layout*)layouts_->get(C->get_operand(1)); - bool is_a_row = ord_a[0] == 1; - bool is_b_row = ord_b[0] == 1; - std::string a_trans = is_a_row ? "" : ".trans"; - std::string b_trans = is_b_row ? ".trans" : ""; - int stride_a_m = is_a_row ? shape_a[1] : 1; - int stride_a_k = is_a_row ? 1 : shape_a[0]; - int stride_b_n = is_b_row ? 1 : shape_b[0]; - int stride_b_k = is_b_row ? shape_b[1] : 1; - int stride_a0 = is_a_row ? stride_a_k : stride_a_m; - int stride_a1 = is_a_row ? stride_a_m : stride_a_k; - int stride_b0 = is_b_row ? stride_b_n : stride_b_k; - int stride_b1 = is_b_row ? stride_b_k : stride_b_n; - int lda = is_a_row ? stride_a_m : stride_a_k; - int ldb = is_b_row ? stride_b_k : stride_b_n; - int per_phase_a = swizzle_->get_per_phase(layout_a); - int max_phase_a = swizzle_->get_max_phase(layout_a); - int per_phase_b = swizzle_->get_per_phase(layout_b); - int max_phase_b = swizzle_->get_max_phase(layout_b); - int num_ptr_a = 8; - int num_ptr_b = 8; - int vec_a = 2; - int vec_b = 4; - distributed_axis ax_m = axes_.at(a_axes_->get(C, 0)); - distributed_axis ax_n = axes_.at(a_axes_->get(C, 1)); -// Value* thread = tgt_->get_local_id(mod_, *builder_, 0); - - Value* off_a0 = is_a_row ? i32(0) : mul(ax_m.thread_id, i32(ax_m.contiguous)); - Value* off_a1 = is_a_row ? mul(ax_m.thread_id, i32(ax_m.contiguous)): i32(0); - std::vector off_a(num_ptr_a); - for(int i = 0; i < num_ptr_a; i++){ -// Value* off_a0i = add(off_a0, i32(is_a_row ? vec_a : layout_c->mts(0)*vec_a)); -// off_a0i = exact_udiv(off_a0i, i32(vec_a)); -// off_a0i = xor_(off_a0i, phase_a); -// off_a0i = mul(off_a0i, i32(vec_a)); - off_a[i] = add(mul(off_a0, i32(stride_a0)), mul(off_a1, i32(stride_a1))); - } - Value* off_b0 = is_b_row ? mul(ax_n.thread_id, i32(ax_n.contiguous)): i32(0); - Value* off_b1 = is_b_row ? i32(0) : mul(ax_n.thread_id, i32(ax_n.contiguous)); - std::vector off_b(num_ptr_b); - for(int i = 0; i < num_ptr_b; i++){ -// Value* off_b0i = add(off_b0, i32(is_b_row ? layout_c->mts(1)*vec_b : vec_b)); -// off_b0i = exact_udiv(off_b0i, i32(vec_b)); -// off_b0i = xor_(off_b0i, phase_b); -// off_b0i = mul(off_b0i, i32(vec_b)); - off_b[i] = add(mul(off_b0, i32(stride_b0)), mul(off_b1, i32(stride_b1))); - } - std::vector ptrs_a(num_ptr_a); - for(int i = 0; i < num_ptr_a; i++) - ptrs_a[i] = gep(shmems_[A], off_a[i]); - std::vector ptrs_b(num_ptr_b); - for(int i = 0; i < num_ptr_b; i++) - ptrs_b[i] = gep(shmems_[B], off_b[i]); - - std::map ret = vals_[D]; - std::map, Value*> has, hbs; - auto ord = layout_c->get_order(); - for(unsigned k = 0; k < NK; k++){ - int z = 0; - for(unsigned i = 0; i < shape_c[ord[1]]; i += layout_c->shape_per_cta(ord[1])) - for(unsigned j = 0; j < shape_c[ord[0]]; j += layout_c->shape_per_cta(ord[0])) - for(unsigned ii = 0; ii < layout_c->nts(ord[1]); ii++) - for(unsigned jj = 0; jj < layout_c->nts(ord[0]); jj++){ - unsigned m = (ord[0] == 1) ? i : j; - unsigned n = (ord[0] == 1) ? j : i; - unsigned mm = (ord[0] == 1) ? ii : jj; - unsigned nn = (ord[0] == 1) ? jj : ii; - if(has.find({m + mm, k}) == has.end()){ - Value* pa = gep(ptrs_a[0], i32((m + mm)*stride_a_m + k*stride_a_k)); - Value* va = load(pa); - has[{m + mm, k}] = va; - } - if(hbs.find({n + nn, k}) == hbs.end()){ - Value* pb = gep(ptrs_b[0], i32((n + nn)*stride_b_n + k*stride_b_k)); - Value* vb = load(pb); - hbs[{n + nn, k}] = vb; - } - ret[idxs_[C].at(z)] = call(f_mul_add, {has[{m+mm,k}], hbs[{n+nn, k}], ret[idxs_[C].at(z)]}); - z++; - } - } - - for(indices_t idx: idxs_.at(C)){ - vals_[C][idx] = ret[idx]; - } -} - -/** - * \brief Code Generation for `dot` - * Dispatches to appropriate specialized function - */ -void generator::visit_dot_inst(ir::dot_inst* dot) { - Function *fn = builder_->GetInsertBlock()->getParent(); - Module *module = fn->getParent(); - ir::value *A = dot->get_operand(0); - ir::value *B = dot->get_operand(1); - ir::value *D = dot->get_operand(2); - Type *c_ty = cvt(D->get_type()->get_scalar_ty()); - Function *f_mul_add = Intrinsic::getDeclaration(module, Intrinsic::fmuladd, std::vector{c_ty}); - auto A_shapes = A->get_type()->get_block_shapes(); - size_t red_axis = 1; - unsigned NK = A_shapes[red_axis]; - bool is_outer = NK == 1; - bool is_mma = layouts_->get(dot)->to_mma(); - if(!is_outer && is_mma && tgt_->as_nvidia()->sm() < 80) - return visit_mma884(dot, A, B, D, NK); - if(!is_outer && is_mma && tgt_->as_nvidia()->sm() >= 80) - return visit_mma16816(dot, A, B, D, NK); // rename it as visit_mma_v2()? - if (dot->get_type()->get_scalar_ty()->is_fp32_ty() && - A->get_type()->get_scalar_ty()->is_fp32_ty()) - return visit_fmadot(dot, A, B, D, NK, c_ty, f_mul_add); - throw std::runtime_error("dot has invalid operand type"); -} - -void generator::visit_trans_inst(ir::trans_inst* trans) { - throw std::runtime_error("not supported"); -} - -/** - * \brief Code Generation for `sqrt` - */ -void generator::visit_sqrt_inst(ir::sqrt_inst* x) { - for(indices_t idx: idxs_.at(x)){ - Value *val = vals_[x->get_operand(0)][idx]; - Value *ret = intrinsic(Intrinsic::sqrt, {val->getType()}, {val}); - vals_[x][idx] = ret; - } -} - -Value* generator::shared_off(const std::vector& shapes, const std::vector& order, indices_t idx){ - // strides - std::vector strides(shapes.size(), builder_->getInt32(0)); - strides[order[0]] = builder_->getInt32(1); - for(size_t i = 1; i < idx.size(); i++) - strides[order[i]] = builder_->CreateMul(strides[order[i-1]], builder_->getInt32(shapes[order[i-1]])); - // result - Value *result = builder_->getInt32(0); - for(size_t i = 0; i < idx.size(); i++) - result = builder_->CreateAdd(result, builder_->CreateMul(idx[i], strides[i])); - return result; -} - -inline Value* generator::shfl_sync(Value* acc, int32_t i){ - Type* ty = acc->getType(); - std::string asm_str = "shfl.sync.bfly.b32 $0, $1, $2, 0x1f, 0xffffffff;"; - InlineAsm *shfl = InlineAsm::get(FunctionType::get(ty, {ty, i32_ty}, false), asm_str, "=f,f,r", false); - if(ty->getPrimitiveSizeInBits() <= 32) - return call(shfl, {acc, i32(i)}); - acc = bit_cast(acc, vec_ty(f32_ty, 2)); - Value* acc0 = builder_->CreateExtractElement(acc, i32(0)); - Value* acc1 = builder_->CreateExtractElement(acc, i32(1)); - Value* ret = UndefValue::get(vec_ty(f32_ty, 2)); - ret = insert_elt(ret, shfl_sync(acc0, i), i32(0)); - ret = insert_elt(ret, shfl_sync(acc1, i), i32(1)); - return bit_cast(ret, ty); -} - -/** - * \brief Code Generation for `reduce` (ND case) - */ -void generator::visit_reducend_inst_fast(ir::reduce_inst* x, acc_fn_t do_acc, Value *neutral){ - ir::value *arg = x->get_operand(0); - const auto with_index = x->with_index(); - unsigned axis = x->get_axis(); - analysis::distributed_layout* layout = dynamic_cast(layouts_->get(arg)); - const auto &shapes = layout->get_shape(); - - Type* sca_ty = cvt(arg->get_type()->get_scalar_ty()); - size_t n_bits = sca_ty->getPrimitiveSizeInBits(); - std::string n_bits_str = std::to_string(n_bits); - std::string cst = (n_bits == 64) ? "l" : "r"; - - FunctionType *st_shared_ty = FunctionType::get(void_ty, {i1_ty, ptr_ty(sca_ty, 3), sca_ty}, false); - InlineAsm *st_shared = InlineAsm::get(st_shared_ty, "@$0 st.shared.b" + n_bits_str + " [$1], $2;", "b," + cst + "," + cst, true); - FunctionType *ld_shared_ty = FunctionType::get(sca_ty, {i1_ty, ptr_ty(sca_ty, 3)}, false); - InlineAsm *ld_shared = InlineAsm::get(ld_shared_ty, "@$1 ld.shared.b" + n_bits_str + " $0, [$2];", "=" + cst + ",b," + cst, true); - - Type *index_ty = IntegerType::get(*ctx_, 32); - FunctionType *st_shared_index_ty = - FunctionType::get(void_ty, {i1_ty, ptr_ty(index_ty, 3), index_ty}, false); - InlineAsm *st_shared_index = InlineAsm::get( - st_shared_index_ty, "@$0 st.shared.b32 [$1], $2;", "b,r,r", true); - FunctionType *ld_shared_index_ty = - FunctionType::get(index_ty, {i1_ty, ptr_ty(index_ty, 3)}, false); - InlineAsm *ld_shared_index = InlineAsm::get( - ld_shared_index_ty, "@$1 ld.shared.b32 $0, [$2];", "=r,b,r", true); - - Value* thread = tgt_->get_local_id(mod_, *builder_, 0); - Value* warp = udiv(thread, i32(32)); - Value* lane = urem(thread, i32(32)); - - unsigned shuffle_width = 0; - unsigned warps_per_inner = 0; - auto arg_vals = vals_.at(arg); - std::vector arg_idxs = idxs_.at(arg); - size_t n_elts = arg_idxs.size(); - unsigned col_per_thread = 0; - Value* warp_j = nullptr; - if (analysis::scanline_layout *scanline = layout->to_scanline()) { - std::vector order = layout->get_order(); - unsigned mts = scanline->mts(order[0]); - shuffle_width = std::min(mts, 32); - warps_per_inner = std::max(mts / 32, 1); - col_per_thread = shapes[order[0]] / mts; - warp_j = urem(warp, i32(warps_per_inner)); - } else if (layout->to_mma()) { - shuffle_width = 4; - warps_per_inner = layout->to_mma()->wpt(1); - col_per_thread = axes_.at(a_axes_->get(arg, 1)).values.size(); - warp_j = axes_.at(a_axes_->get(arg, 1)).thread_id; - } - assert(warp_j != nullptr); - - // unsigned col_per_thread = 2 * shapes[order[0]] / layout->shape_per_cta(order[0]); - // - Value *base = cast_shared_layout_ptr(layouts_->get(layouts_->tmp(x)), - cvt(x->get_type()->get_scalar_ty())); - Value *index_base = - with_index ? cast_shared_layout_ptr(layouts_->get(layouts_->tmp_index(x)), - IntegerType::get(*ctx_, 32)) - : nullptr; - - // preds - Value* is_lane0 = icmp_eq(lane, i32(0)); - Value* is_warp0 = icmp_eq(warp, i32(0)); - Value* is_thread0 = icmp_eq(thread, i32(0)); - Value* lane_j = urem(lane, i32(shuffle_width)); - if(warps_per_inner > 1) - add_barrier(); - // compute partial sum for each warp, and store to shared memory - for(size_t i = 0; i < n_elts/col_per_thread; i++){ - std::pair acc; - // reduce within thread - for(size_t j = 0; j < col_per_thread; j++){ - auto arg_idx = arg_idxs[i*col_per_thread + j]; - bool is_first = j == 0; - do_acc( - acc, [&]() -> Value * { return arg_vals[arg_idx]; }, - [&]() -> Value * { return arg_idx[axis]; }, is_first); - } - - // reduce within warp - for(int k = shuffle_width/2 ; k > 0; k >>= 1) { - do_acc( - acc, [&]() -> Value * { return shfl_sync(acc.first, k); }, - [&]() -> Value * { return shfl_sync(acc.second, k); }, false); - } - // store partial result to shared memory - auto x_idxs = idxs_[x][i]; - Value* x_idx = x_idxs.empty() ? builder_->getInt32(0) : x_idxs[0]; - // single warp on the reduce dimension -- no need to use shmem - if(warps_per_inner==1){ - vals_[x][idxs_[x][i]] = with_index ? acc.second : acc.first; - } - else{ - Value* st_off = add(mul(x_idx, i32(warps_per_inner)), warp_j); - call(st_shared, {icmp_eq(lane_j, i32(0)), gep(base, st_off), acc.first}); - if (with_index) { - call(st_shared_index, - {icmp_eq(lane_j, i32(0)), gep(index_base, st_off), acc.second}); - } - } - } - if(warps_per_inner==1) - return; - add_barrier(); - // at this point, partial accumulator synchronized in shared memory - // Just need to reduce `warp_per_inner` numbers in shared memory - for(size_t i = 0; i < n_elts/col_per_thread; i++){ - auto x_idxs = idxs_[x][i]; - Value* x_idx = x_idxs.empty() ? builder_->getInt32(0) : x_idxs[0]; - Value* ld_off = add(mul(x_idx, i32(warps_per_inner)), urem(lane_j, i32(warps_per_inner))); - std::pair acc; - acc.first = call(ld_shared, {builder_->getInt1(true), gep(base, ld_off)}); - acc.second = with_index ? call(ld_shared_index, {builder_->getInt1(true), - gep(index_base, ld_off)}) - : nullptr; - for (int k = warps_per_inner / 2; k > 0; k >>= 1) { - do_acc( - acc, [&]() -> Value * { return shfl_sync(acc.first, k); }, - [&]() -> Value * { return shfl_sync(acc.second, k); }, false); - } - vals_[x][idxs_[x][i]] = with_index ? acc.second : acc.first; - } - // add_barrier(); -} - - -void generator::visit_reducend_inst(ir::reduce_inst* x, acc_fn_t do_acc, Value *neutral) { - ir::value *arg = x->get_operand(0); - unsigned axis = x->get_axis(); - auto with_index = x->with_index(); - - // reduce within thread - // index-> - std::map> accs; - for(indices_t idx: idxs_.at(arg)){ - indices_t pidx = idx; - pidx[axis] = i32(0); - bool is_first = accs.find(pidx) == accs.end(); - do_acc( - accs[pidx], [&]() -> Value * { return vals_[arg][idx]; }, - [&]() -> Value * { return idx[axis]; }, is_first); - }; - - // reduce within blocks - auto *data_layout = layouts_->get(layouts_->tmp(x)); - auto *data_ptr = - cast_shared_layout_ptr(data_layout, cvt(x->get_type()->get_scalar_ty())); - auto *index_ptr = - with_index ? cast_shared_layout_ptr(layouts_->get(layouts_->tmp_index(x)), - IntegerType::get(*ctx_, 32)) - : data_ptr; - - auto shape = data_layout->get_shape(); - auto order = data_layout->get_order(); - Value *lane = axes_.at(a_axes_->get(arg, axis)).thread_id; - for(auto& x: accs) { - // current element being computed - std::pair acc = x.second; - indices_t write_idx = x.first; - write_idx[axis] = lane; - // shared memory write pointer - Value *write_off = shared_off(shape, order, write_idx); - Value *write_ptr = gep(data_ptr, write_off); - Value *index_write_ptr = gep(index_ptr, write_off); - // initialize shared memory - add_barrier(); - store(acc.first, write_ptr); - if (with_index) { - store(acc.second, index_write_ptr); - } - // build result - indices_t idx(write_idx.size(), i32(0)); - for(size_t i = shape[axis]/2; i > 0; i >>= 1){ - idx[axis] = i32(i); - // read pointer - Value *read_msk = icmp_ult(lane, i32(i)); - Value *read_off = select(read_msk, shared_off(shape, order, idx), i32(0)); - Value *read_ptr = gep(write_ptr, read_off); - Value *index_read_ptr = gep(index_write_ptr, read_off); - add_barrier(); - // update accumulator - do_acc( - acc, [&]() -> Value * { return load(read_ptr); }, - [&]() -> Value * { return load(index_read_ptr); }, false); - add_barrier(); - store(acc.first, write_ptr); - if (with_index) { - store(acc.second, index_write_ptr); - } - } - } - add_barrier(); - - // write back - for(indices_t idx: idxs_.at(x)){ - indices_t read_idx = idx; - read_idx.insert(read_idx.begin() + axis, i32(0)); - Value *read_off = shared_off(shape, order, read_idx); - Value *read_ptr = - with_index ? gep(index_ptr, read_off) : gep(data_ptr, read_off); - vals_[x][idx] = load(read_ptr); - }; -} - -/** - * \brief Code Generation for `reduce` (generic case) - */ -void generator::visit_reduce_inst(ir::reduce_inst* x) { - Type *ty = cvt(x->get_type()->get_scalar_ty()); - // accumulation function - ir::reduce_inst::op_t op = x->get_op(); - auto do_acc_op = [&](Value *x, Value *y) -> Value* { - switch(op){ - case ir::reduce_inst::ADD: return add(x, y); - case ir::reduce_inst::SUB: return sub(x, y); - case ir::reduce_inst::ARGUMAX: return icmp_uge(x, y); - case ir::reduce_inst::ARGUMIN: return icmp_ule(x, y); - case ir::reduce_inst::ARGMAX: return icmp_sge(x, y); - case ir::reduce_inst::ARGMIN: return icmp_sle(x, y); - case ir::reduce_inst::UMAX: return select(icmp_uge(x, y), x, y); - case ir::reduce_inst::UMIN: return select(icmp_ule(x, y), x, y); - case ir::reduce_inst::MAX: return select(icmp_sge(x, y), x, y); - case ir::reduce_inst::MIN: return select(icmp_sle(x, y), x, y); - case ir::reduce_inst::FADD: return fadd(x, y); - case ir::reduce_inst::FSUB: return fsub(x, y); - case ir::reduce_inst::ARGFMAX: return fcmp_oge(x, y); - case ir::reduce_inst::ARGFMIN: return fcmp_ole(x, y); - case ir::reduce_inst::FMAX: return max_num(x, y); - case ir::reduce_inst::FMIN: return min_num(x, y); - case ir::reduce_inst::XOR: return xor_(x, y); - - default: throw std::runtime_error("unreachable"); - } - }; - - auto do_acc = [&](std::pair &acc, - std::function load_value_fn, - std::function load_index_fn, - bool is_first) -> void { - auto *val = load_value_fn(); - if (x->with_index()) { - auto *index = load_index_fn(); - if (is_first) { - acc.first = val; - acc.second = index; - } else { - Value *ret = do_acc_op(acc.first, val); - acc.first = select(ret, acc.first, val); - acc.second = select(ret, acc.second, index); - } - } else { - acc.first = is_first ? val : do_acc_op(acc.first, val); - } - }; - - // neutral element - Value *neutral; - switch(op) { - case ir::reduce_inst::ADD: neutral = ConstantInt::get(ty, 0); break; - case ir::reduce_inst::SUB: neutral = ConstantInt::get(ty, 0); break; - case ir::reduce_inst::ARGUMAX: neutral = ConstantInt::get(ty, INT32_MIN); break; - case ir::reduce_inst::ARGUMIN: neutral = ConstantInt::get(ty, INT32_MAX); break; - case ir::reduce_inst::ARGMAX: neutral = ConstantInt::get(ty, INT32_MIN); break; - case ir::reduce_inst::ARGMIN: neutral = ConstantInt::get(ty, INT32_MAX); break; - case ir::reduce_inst::UMAX: neutral = ConstantInt::get(ty, 0); break; - case ir::reduce_inst::UMIN: neutral = ConstantInt::get(ty, UINT32_MAX); break; - case ir::reduce_inst::MAX: neutral = ConstantInt::get(ty, INT32_MIN); break; - case ir::reduce_inst::MIN: neutral = ConstantInt::get(ty, INT32_MAX); break; - case ir::reduce_inst::FADD: neutral = ConstantFP::get(ty, 0); break; - case ir::reduce_inst::FSUB: neutral = ConstantFP::get(ty, 0); break; - case ir::reduce_inst::ARGFMAX: neutral = ConstantFP::get(ty, -INFINITY); break; - case ir::reduce_inst::ARGFMIN: neutral = ConstantFP::get(ty, INFINITY); break; - case ir::reduce_inst::FMAX: neutral = ConstantFP::get(ty, -INFINITY); break; - case ir::reduce_inst::FMIN: neutral = ConstantFP::get(ty, INFINITY); break; - case ir::reduce_inst::XOR: neutral = ConstantInt::get(ty, 0); break; - default: throw std::runtime_error("unreachable"); - } - ir::value *arg = x->get_operand(0); - bool is_coalesced_scanline = layouts_->is_coalesced_scanline(x); - bool is_a100_mma = layouts_->is_a100_mma(x); - if (is_coalesced_scanline || is_a100_mma) - visit_reducend_inst_fast(x, do_acc, neutral); - else - visit_reducend_inst(x, do_acc, neutral); -} - -/** - * \brief Code Generation for `select` - */ -void generator::visit_select_inst(ir::select_inst* x) { - for(indices_t idx: idxs_.at(x)){ - vals_[x][idx] = select(vals_[x->get_operand(0)][idx], - vals_[x->get_operand(1)][idx], - vals_[x->get_operand(2)][idx]); - } -} - - - -void generator::visit_layout_convert(ir::value *out, ir::value *in){ - ir::block_type::block_shapes_t shape = out->get_type()->get_block_shapes(); - // pointer to temporary shared memory - Type *ty = cvt(out->get_type()->get_scalar_ty()); - - // Orders - analysis::distributed_layout* in_layout = dynamic_cast(layouts_->get(in)); - analysis::distributed_layout* out_layout = dynamic_cast(layouts_->get(out)); - Value *base; - int off = alloc_->offset(layouts_->get(layouts_->tmp(out))); - // std::cout << off << std::endl; - base = gep(shmem_, i32(off)); - base = bit_cast(base, ptr_ty(ty, 3)); - std::vector n_reps; - for(int i = 0; i < shape.size(); i++){ - int in_per_cta = in_layout->shape_per_cta(i); - int out_per_cta = out_layout->shape_per_cta(i); - int max_per_cta = std::max(in_per_cta, out_per_cta); - n_reps.push_back(shape[i]/max_per_cta); - } - std::vector> in_ax; - std::vector> out_ax; - for(int d = 0; d < shape.size(); d++){ - in_ax.push_back(axes_.at(a_axes_->get(in, d)).values); - out_ax.push_back(axes_.at(a_axes_->get(out, d)).values); - } - auto in_ord = - in_layout->to_mma() ? out_layout->get_order() : in_layout->get_order(); - auto out_ord = - out_layout->to_mma() ? in_layout->get_order() : out_layout->get_order(); - // out_ord[0] == 0 or in_order[0] == 0 means the first dimension is - // non-contiguous. in_vec can be greater than 0 only if both out_ord[0] and - // and in_ord[0] are contiguous. - int in_vec = out_ord[0] == 0 ? 1 - : in_ord[0] == 0 ? 1 - : in_layout->contig_per_thread(in_ord[0]); - int out_vec = out_ord[0] == 0 ? 1 : out_layout->contig_per_thread(out_ord[0]); - int pad = std::max(in_vec, out_vec); - Value *in_ld = i32(shape[in_ord[0]] + pad); - Value *out_ld = i32(shape[out_ord[0]] + pad); - for(int i = 0; i < n_reps[0]; i++) - for(int j = 0; j < n_reps[1]; j++){ - int max_ii, max_jj; - add_barrier(); - max_ii = in_ax[0].size()/n_reps[0]; - max_jj = in_ax[1].size()/n_reps[1]; - for(int ii = 0; ii < max_ii; ii++) - for(int jj = 0; jj < max_jj; jj+=in_vec){ - // shared mem pointer - indices_t offs = {in_ax[0][ii], in_ax[1][jj]}; - Value *off = add(offs[out_ord[0]], mul(out_ld, offs[out_ord[1]])); - Value *ptr = gep(base, off); - // stash value to shared mem - Value* vals = UndefValue::get(vec_ty(ty, in_vec)); - for(int jjj = 0; jjj < in_vec; jjj++){ - indices_t idxs = {in_ax[0][i*max_ii + ii], - in_ax[1][j*max_jj + jj + jjj]}; - Value* val = bit_cast(vals_[in][idxs], ty); - vals = insert_elt(vals, val, jjj); - } - ptr = bit_cast(ptr, ptr_ty(vals->getType(), ptr->getType()->getPointerAddressSpace())); - store(vals, ptr); - } - add_barrier(); - max_ii = out_ax[0].size()/n_reps[0]; - max_jj = out_ax[1].size()/n_reps[1]; - for(int ii = 0; ii < max_ii; ii++) - for(int jj = 0; jj < max_jj; jj+=out_vec){ - // shared mem pointer - indices_t offs = {out_ax[0][ii], out_ax[1][jj]}; - Value *off = add(offs[out_ord[0]], mul(out_ld, offs[out_ord[1]])); - Value *ptr = gep(base, off); - ptr = bit_cast(ptr, ptr_ty(vec_ty(ty, out_vec), ptr->getType()->getPointerAddressSpace())); - // load value from shared rem - Value* vals = load(ptr); - for(int jjj = 0; jjj < out_vec; jjj++){ - indices_t idxs = {out_ax[0][i*max_ii + ii], - out_ax[1][j*max_jj + jj + jjj]}; - vals_[out][idxs] = extract_elt(vals, jjj); - } - } - - } -} - -void generator::visit_cvt_layout_inst(ir::cvt_layout_inst *rc) { - visit_layout_convert(rc, rc->get_operand(0)); -} - -void generator::visit_masked_load_async_inst(ir::masked_load_async_inst* x){ - unsigned in_vec = 1; - ir::value *arg = x->get_pointer_operand(); - analysis::shared_layout* out_layout = layouts_->get(x)->to_shared(); - analysis::scanline_layout* in_layout = layouts_->get(arg)->to_scanline(); - auto out_order = out_layout->get_order(); - auto in_order = in_layout->get_order(); - // tiles - if(out_order == in_order) - in_vec = in_layout->nts(in_order[0]); - int out_vec = swizzle_->get_vec(out_layout); - int min_vec = std::min(out_vec, in_vec); - int s = std::max(out_vec / in_vec, 1); - // - int per_phase = swizzle_->get_per_phase(out_layout); - int max_phase = swizzle_->get_max_phase(out_layout); - // - int in_ld = in_layout->get_shape()[in_order[0]] / in_layout->mts(in_order[0]); - int n_shared_1 = std::max(per_phase*max_phase / in_layout->mts(in_order[1]), 1); - int n_shared_0 = std::max(in_vec / out_vec, 1); - auto shapes = x->get_type()->get_block_shapes(); - BasicBlock* CurrBB = builder_->GetInsertBlock(); - BasicBlock* FirstBB = &CurrBB->getParent()->getEntryBlock(); - std::map, Value*> tmp; - std::vector> shared; - for(int i = 0; i < idxs_.at(arg).size(); i++){ - unsigned id = i / min_vec; - // input ptr info - int id_0 = id % (in_ld/min_vec); - int id_1 = id / (in_ld/min_vec); - int off_0 = id_0 / n_shared_0 * n_shared_0 * in_layout->mts(in_order[0]); - int off_1 = id_1 / n_shared_1 * n_shared_1 * in_layout->mts(in_order[1]); - int off = (off_1*shapes[in_order[0]] + off_0); - std::pair key = {id_1 % n_shared_1, id_0 % n_shared_0}; - if(tmp.find(key) == tmp.end()){ - if(CurrBB != FirstBB) - builder_->SetInsertPoint(FirstBB->getTerminator()); - indices_t idx = idxs_.at(arg).at(key.first*in_ld); - Value* phase = udiv(idx[in_order[1]], i32(per_phase)); - phase = urem(phase, i32(max_phase)); - Value* off_1 = mul(idx[in_order[1]], i32(shapes[in_order[0]])); - Value* off_0 = add(idx[in_order[0]], i32(key.second*out_vec)); - off_0 = udiv(off_0, i32(min_vec)); - off_0 = add(mul(xor_(udiv(off_0, i32(s)), phase),i32(s)), urem(off_0, i32(s))); - off_0 = mul(off_0 , i32(min_vec)); - Value* off = add(off_0, off_1); - if(CurrBB != FirstBB) - builder_->SetInsertPoint(CurrBB); - tmp[key] = gep(shmems_[x], {off}); - } - shared.push_back({tmp[key], off}); - } - size_t dtsize = x->get_type()->get_scalar_ty()->get_primitive_size_in_bits() / 8; - for(size_t i = 0; i < idxs_.at(arg).size(); i += in_vec){ - auto idx = idxs_[arg][i]; - // input ptr info - Value *ptr = vals_[arg][idx]; - size_t in_off = 0; - GetElementPtrInst *in_gep = dyn_cast(vals_[arg][idx]); - if(in_gep){ - ConstantInt* cst = dyn_cast(in_gep->idx_begin()); - in_off = cst ? cst->getValue().getSExtValue()*dtsize : 0; - ptr= cst ? in_gep->getPointerOperand() : in_gep; - } - // output ptr info - Value* out_base = shared[i].first; - int out_off = shared[i].second*dtsize; - // asm - std::string mod = (in_vec*dtsize == 16) ? ".cg" : ".ca"; -// Value* false_value = vals_[x->get_false_value_operand()][idx]; -// bool is_zero_false_value = false; -// if(Constant* cst = dyn_cast(false_value)) -// is_zero_false_value = cst->isZeroValue(); - Value* src_size = builder_->CreateSelect(vals_[x->get_mask_operand()][idx], i32(in_vec*dtsize), i32(0)); - std::string asm_str = "cp.async" + mod + ".shared.global [$0 + " + std::to_string(out_off) + "], [$1 + " + std::to_string(in_off) + "], " + std::to_string(in_vec*dtsize) + ", $2;"; - FunctionType *ty = FunctionType::get(void_ty, {out_base->getType(), ptr->getType(), builder_->getInt32Ty()}, false); - InlineAsm *iasm = InlineAsm::get(ty, asm_str, "r,l,r", true); - call(iasm, {out_base, ptr, src_size}); - } - - std::string asm_str = "cp.async.commit_group;"; - InlineAsm *iasm = InlineAsm::get(FunctionType::get(void_ty, {}), asm_str, "", true); - call(iasm); -} - -void generator::visit_copy_to_shared_inst(ir::copy_to_shared_inst* cts) { - unsigned in_vec = 1; - ir::value *arg = cts->get_operand(0); - analysis::shared_layout* out_layout = layouts_->get(cts)->to_shared(); - analysis::distributed_layout* in_layout = dynamic_cast(layouts_->get(arg)); - auto out_order = out_layout->get_order(); - auto in_order = in_layout->get_order(); - // tiles - if(out_order == in_order) - in_vec = in_layout->contig_per_thread(in_order[0]); - int out_vec = swizzle_->get_vec(out_layout); - int min_vec = std::min(out_vec, in_vec); - int s = std::max(out_vec / in_vec, 1); - // - int per_phase = swizzle_->get_per_phase(out_layout); - int max_phase = swizzle_->get_max_phase(out_layout); - // - int mts_0 = in_layout->shape_per_cta(in_order[0]) / in_layout->contig_per_thread(in_order[0]); - int mts_1 = in_layout->shape_per_cta(in_order[1]) / in_layout->contig_per_thread(in_order[1]); - if(in_layout->to_mma()){ - mts_0 = 4 * in_layout->to_mma()->wpt(in_order[0]); - mts_1 = 8 * in_layout->to_mma()->wpt(in_order[1]); - per_phase = 1; - max_phase = 8; - } - - int in_ld = in_layout->get_shape()[in_order[0]] / mts_0; - int n_shared_0 = std::max(in_vec / out_vec, 1); - int n_shared_1 = std::max(per_phase*max_phase / mts_1, 1); - if(in_layout->to_mma()){ - n_shared_0 = 8; - n_shared_1 = 1; - } - - BasicBlock* CurrBB = builder_->GetInsertBlock(); - BasicBlock* FirstBB = &CurrBB->getParent()->getEntryBlock(); - auto shapes = cts->get_type()->get_block_shapes(); - - - // store to shared - Value *current = nullptr; - std::map, Value*> ptrs; - for(int i = 0; i < idxs_.at(arg).size(); i++){ - auto idx = idxs_[arg][i]; - Value *in_value = vals_[arg][idx]; - if(i % min_vec == 0) - current = UndefValue::get(vec_ty(in_value->getType(), min_vec)); - current = insert_elt(current, in_value, i % min_vec); - if(i % min_vec == min_vec - 1){ - unsigned id = i / min_vec; - // input ptr info - int id_0 = id % (in_ld/min_vec); - int id_1 = id / (in_ld/min_vec); - // std::cout << id_0 << " " << id_1 << " " << in_ld << " " << std::endl; - std::pair key = {id_1 % n_shared_1, id_0 % n_shared_0}; - if(ptrs.find(key) == ptrs.end()){ - if(FirstBB->getTerminator()) - builder_->SetInsertPoint(FirstBB->getTerminator()); - else - builder_->SetInsertPoint(FirstBB); - indices_t idx = idxs_.at(arg).at(key.first*in_ld); - Value* phase = udiv(idx[in_order[1]], i32(per_phase)); - phase = urem(phase, i32(max_phase)); - Value* off_1 = mul(idx[in_order[1]], i32(shapes[in_order[0]])); - Value* off_0 = add(idx[in_order[0]], i32(key.second*out_vec)); - off_0 = udiv(off_0, i32(min_vec)); - off_0 = add(mul(xor_(udiv(off_0, i32(s)), phase),i32(s)), urem(off_0, i32(s))); - off_0 = mul(off_0 , i32(min_vec)); - Value* off = add(off_0, off_1); - builder_->SetInsertPoint(CurrBB); - ptrs[key] = gep(shmems_.at(cts), {off}); - } - int off_0 = id_0 / n_shared_0 * n_shared_0 * mts_0; - int off_1 = id_1 / n_shared_1 * n_shared_1 * mts_1; - if(in_layout->to_mma()){ - off_0 = id_0/n_shared_0*n_shared_0*8; - off_1 = id_1/n_shared_1*n_shared_1*8; - } - int off = (off_1*shapes[in_order[0]] + off_0); - Value* ptr = gep(ptrs[key], {i32(off)}); - ptr = bit_cast(ptr, current->getType()->getPointerTo(3)); - // asm - store(current, ptr); - } - }; -} - -void generator::visit_copy_from_shared_inst(ir::copy_from_shared_inst*) { - throw std::runtime_error("TODO"); -} - -Instruction* generator::add_barrier() { - Module *module = builder_->GetInsertBlock()->getModule(); - return tgt_->add_barrier(module, *builder_); -} - -void generator::visit_barrier_inst(ir::barrier_inst*) { - add_barrier(); -} - -void generator::visit_clock_inst(ir::clock_inst* clock){ - InlineAsm *iasm = InlineAsm::get(FunctionType::get(builder_->getInt64Ty(), {}), "mov.u64 $0, %clock64;", "=l", true); - vals_[clock][{}] = call(iasm); -} - -void generator::visit_globaltimer_inst(ir::globaltimer_inst* timer){ - InlineAsm *iasm = InlineAsm::get(FunctionType::get(builder_->getInt64Ty(), {}), "mov.u64 $0, %globaltimer;", "=l", true); - vals_[timer][{}] = call(iasm); -} - - - -void generator::visit_prefetch_s_inst(ir::prefetch_s_inst *i) { - ir::value *v = i->get_operand(0); - int inc = i->get_inc(); - if (inc == 0) { - // If dot has not been visitied, do nothing. - } else { - // If dot has been visitied, insert prefetched lds - assert(inc == 1); - assert(prefetch_latch_to_bb_.find(v) != prefetch_latch_to_bb_.end() && - "dot hasn't be visited"); - // sink lds & extract element - // move lds & all uses to current location - std::stack work_stack; - for (Value *value : prefetch_latch_to_bb_[v]) - work_stack.push(value); - std::vector dead_instrs; - while (!work_stack.empty()) { - Value *m = work_stack.top(); - work_stack.pop(); - - for (auto u : m->users()) - work_stack.push(u); - - assert(isa(m)); - auto m_instr = static_cast(m); - - m_instr->removeFromParent(); - m_instr->insertAfter(&*std::prev(builder_->GetInsertBlock()->end())); - assert(m_instr->getParent() == &*builder_->GetInsertBlock()); - builder_->SetInsertPoint(m_instr->getParent()); - } - } -} - -void generator::visit_async_wait_inst(ir::async_wait_inst* i) { - std::string asm_str = "cp.async.wait_group " + std::to_string(i->get_N()) + ";"; - InlineAsm *iasm = InlineAsm::get(FunctionType::get(void_ty, {}), asm_str, "", true); - call(iasm); -} - -/** - * \brief Code Generation for `extern_elementwise` - */ -void generator::visit_extern_elementwise_inst(ir::extern_elementwise_inst *i) { - std::vector operand_types; - for (size_t j = 0; j < i->get_num_operands(); j++) { - operand_types.push_back( - cvt(i->get_operand(j)->get_type()->get_scalar_ty())); - } - Type *ret_type = cvt(i->get_type()->get_scalar_ty()); - FunctionType *FT = - FunctionType::get(ret_type, std::move(operand_types), false); - Function *F = llvm::cast( - mod_->getOrInsertFunction(i->get_symbol_name(), FT).getCallee()); - for (auto idx : idxs_.at(i)) { - std::vector args; - for (size_t j = 0; j < i->get_num_operands(); j++) { - args.emplace_back(vals_[i->get_operand(j)][idx]); - } - vals_[i][idx] = call(F, std::move(args)); - } - add_extern_lib(i->get_lib_name(), i->get_lib_path()); -} - -//void generator::visit_make_range_dyn(ir::make_range_dyn* x) { -// for(indices_t idx: idxs_.at(x)){ -// assert(idx.size() == 1); -// if(idx[0] == i32(0)) -// vals_[x][idx] = idx[0]; -// else{ -// BinaryOperator *bin_add = dyn_cast(idx[0]); -// assert(bin_add); -// vals_[x][idx] = bin_add->getOperand(0); -// } -// } -//} - -//void generator::visit_make_range_sta(ir::make_range_sta* x) { -// for(indices_t idx: idxs_.at(x)){ -// assert(idx.size() == 1); -// if(idx[0] == i32(0)){ -// vals_[x][idx] = idx[0]; -// } -// else{ -// BinaryOperator *bin_add = dyn_cast(idx[0]); -// assert(bin_add); -// Value *cst = bin_add->getOperand(1); -// assert(isa(cst)); -// vals_[x][idx] = cst; -// } -// }; -//} - -void generator::visit_make_range(ir::make_range* x) { - for(indices_t idx: idxs_.at(x)){ - Value* start = ConstantInt::get(idx[0]->getType(), x->get_first()->get_value()); - vals_[x][idx] = add(start, idx[0]); - } -} - -void generator::visit_undef_value(ir::undef_value *x) { - ir::type* sca_ty = x->get_type()->get_scalar_ty(); - Type* ty = cvt(sca_ty); - for(indices_t idx: idxs_.at(x)) - vals_[x][idx] = llvm::UndefValue::get(ty); -} - -void generator::visit_constant_int(ir::constant_int *x){ - Type *ty = cvt(x->get_type()->get_scalar_ty()); - for(indices_t idx: idxs_.at(x)) - vals_[x][idx] = ConstantInt::get(ty, x->get_value()); -} - -void generator::visit_constant_fp(ir::constant_fp *x){ - Type *ty = cvt(x->get_type()->get_scalar_ty()); - for(indices_t idx: idxs_.at(x)) { - // manually select bf16 constant - if (x->get_type()->get_scalar_ty()->is_bf16_ty()) { - // highest 16 bits of fp32 - float fp32_value = x->get_value(); - uint16_t bf16_raw = (*reinterpret_cast(&fp32_value) - & 0xffff0000) >> 16; - std::stringstream const_str; - const_str << "0x" << std::hex << bf16_raw << "U"; // unsigned - InlineAsm *bf16_const = InlineAsm::get(FunctionType::get(bf16_ty, {}, false), - " mov.b16 $0, " + const_str.str() + ";", - "=h", false); - vals_[x][idx] = builder_->CreateCall(bf16_const, {}); - } else - vals_[x][idx] = ConstantFP::get(ty, x->get_value()); - } -} - -void generator::visit_alloc_const(ir::alloc_const *alloc) { - unsigned size = ((ir::constant_int*)alloc->get_operand(0))->get_value(); - Type *element_ty = cvt(alloc->get_type()->get_pointer_element_ty()); - Type *array_ty = llvm::ArrayType::get(element_ty, size); - Value *array = new llvm::GlobalVariable(*mod_, array_ty, false, llvm::GlobalVariable::ExternalLinkage, - nullptr, alloc->get_name(), nullptr, llvm::GlobalVariable::NotThreadLocal, 4); - vals_[alloc][{}] = bit_cast(array, element_ty->getPointerTo(4)); -} - - -void generator::forward_declare(ir::function* fn){ - FunctionType *fn_ty = (FunctionType*)cvt(fn->get_fn_type()); - if(!tgt_->is_gpu()){ - Type *fn_ret_ty = fn_ty->getReturnType(); - std::vector fn_args_ty; - for(unsigned i = 0; i < fn_ty->getNumParams(); i++) - fn_args_ty.push_back(fn_ty->getParamType(i)); - fn_args_ty.push_back(i32_ty); - fn_args_ty.push_back(i32_ty); - fn_args_ty.push_back(i32_ty); - fn_ty = FunctionType::get(fn_ret_ty, fn_args_ty, false); - } - Function *ret = Function::Create(fn_ty, Function::ExternalLinkage, fn->get_name(), mod_); - fns_[fn] = ret; -} - -Value *generator::cast_shared_layout_ptr(analysis::data_layout *layout, - Type *ty) { - unsigned addr_space = shmem_->getType()->getPointerAddressSpace(); - Value *base = bit_cast(shared_ptr_.at(layout), ptr_ty(ty, addr_space)); - return base; -} - -void generator::visit_function(ir::function* fn) { - idxs_.clear(); - vals_.clear(); - seen_.clear(); - LLVMContext &ctx = builder_->getContext(); - - Function* ret = fns_[fn]; - - - // set attributes - for(auto attr_pair: fn->attrs()){ - unsigned id = attr_pair.first; - for(ir::attribute attr: attr_pair.second) - if(attr.is_llvm_attr()){ - llvm::Attribute llattr = cvt(attr); - if(llattr.getKindAsEnum() != llvm::Attribute::None) - ret->addAttribute(id, cvt(attr)); - } - } - // set metadata - if(tgt_->is_gpu()){ - tgt_->set_kernel(*builder_, ctx, mod_, ret); - Metadata *md_args[] = { - ValueAsMetadata::get(ret), - MDString::get(ctx, "maxntidx"), - ValueAsMetadata::get(i32(num_warps_*32)) - }; - mod_->getOrInsertNamedMetadata("nvvm.annotations")->addOperand(MDNode::get(ctx, md_args)); - } - // set arguments - for(unsigned i = 0; i < fn->args().size(); i++) - vals_[fn->args()[i]][{}] = &*(ret->arg_begin() + i); - // create blocks - auto blocks = ir::cfg::reverse_post_order(fn); - for(ir::basic_block *block: blocks) { - BasicBlock *dst_block = BasicBlock::Create(ctx, block->get_name(), ret); - bbs_[block] = dst_block; - } - builder_->SetInsertPoint(bbs_[fn->blocks()[0]]); - // create policies - if(tgt_->as_nvidia()->sm() >= 80) - for(ir::load_inst::EVICTION_POLICY evict: {ir::load_inst::EVICT_FIRST, ir::load_inst::EVICT_LAST}){ - std::string policy = (evict == ir::load_inst::EVICT_FIRST) ? "evict_first" : "evict_last"; - std::string asm_str = "createpolicy.fractional.L2::" + policy + ".b64 $0, 1.0;"; - InlineAsm* iasm = InlineAsm::get(FunctionType::get(i64_ty, {}), asm_str, "=l", false); - policies_[evict] = call(iasm); - } - // initialize layouts - for(auto x: layouts_->get_all()){ - visit_layout(x.second); - } - // generate LLVM-IR code - for(ir::basic_block *block: blocks) - visit_basic_block(block); - // finalize - finalize_function(fn); -} - - - -void generator::visit_layout_mma(analysis::mma_layout* layout) { - ir::value *a = nullptr; - ir::value *b = nullptr; - for(ir::value* v: layout->get_values()) - if(ir::dot_inst* dot = dynamic_cast(v)){ - a = dot->get_operand(0); - b = dot->get_operand(1); - } - analysis::data_layout* layout_a = layouts_->get(a); - analysis::data_layout* layout_b = layouts_->get(b); - - const auto& shape = layout->get_shape(); - Value *_1 = i32(1); - Value *_2 = i32(2); - Value *_3 = i32(3); - Value *_4 = i32(4); - Value *_8 = i32(8); - Value *_16 = i32(16); - Value *_32 = i32(32); - int cc = tgt_->as_nvidia()->sm(); - std::vector idx_m; - std::vector idx_n; - std::vector idx_z; - // - Value* thread = tgt_->get_local_id(mod_, *builder_, 0); - Value *lane = urem(thread, _32); - Value *warp = udiv(thread, _32); - /* lane offset */ - if(cc < 80){ - auto ord_a = layout_a->get_order(); - auto ord_b = layout_b->get_order(); - bool is_a_row = ord_a[0] != 0; - bool is_b_row = ord_b[0] != 0; - /* warp offset */ - Value *warp_0 = urem(warp, i32(layout->wpt(0))); - Value *warp_12 = udiv(warp, i32(layout->wpt(0))); - Value *warp_1 = urem(warp_12, i32(layout->wpt(1))); - Value *off_warp_m = mul(warp_0, i32(layout->spw(0))); - Value *off_warp_n = mul(warp_1, i32(layout->spw(1))); - // Quad offset - Value *off_quad_m = mul(udiv(and_(lane, _16), _4), i32(layout->fpw(0))); - Value *off_quad_n = mul(udiv(and_(lane, _16), _4), i32(layout->fpw(1))); - // Pair offset - Value *off_pair_m = udiv(urem(lane, _16), _4); - off_pair_m = urem(off_pair_m, i32(layout->fpw(0))); - off_pair_m = mul(off_pair_m, i32(4)); - Value *off_pair_n = udiv(urem(lane, _16), _4); - off_pair_n = udiv(off_pair_n, i32(layout->fpw(0))); - off_pair_n = urem(off_pair_n, i32(layout->fpw(1))); - off_pair_n = mul(off_pair_n, i32(4)); - // scale - off_pair_m = mul(off_pair_m, i32(layout->rep(0)/2)); - off_quad_m = mul(off_quad_m, i32(layout->rep(0)/2)); - off_pair_n = mul(off_pair_n, i32(layout->rep(1)/2)); - off_quad_n = mul(off_quad_n, i32(layout->rep(1)/2)); - // Quad pair offset - Value *off_lane_m = add(off_pair_m, off_quad_m); - Value *off_lane_n = add(off_pair_n, off_quad_n); - // a offset - offset_a_m_[layout] = add(off_warp_m, off_lane_m); - offset_a_k_[layout] = and_(lane, _3); - // b offsets - offset_b_n_[layout] = add(off_warp_n, off_lane_n); - offset_b_k_[layout] = and_(lane, _3); - // i indices - Value *offset_c_m = add(and_(lane, _1), offset_a_m_[layout]); - for(unsigned m = 0; m < shape[0]; m+=layout->shape_per_cta(0)) - for(unsigned mm = 0; mm < layout->rep(0); mm++) - idx_m.push_back(add(offset_c_m, i32(m + mm*2))); - // j indices - Value *offset_c_n = add(and_(lane, _2), add(off_warp_n, off_pair_n)); - for(unsigned n = 0; n < shape[1]; n+=layout->shape_per_cta(1)) - for(unsigned nn = 0; nn < layout->rep(1); nn++){ - idx_n.push_back(add(offset_c_n, i32(n + nn/2*4 + (nn%2)*2*layout->fpw(1)*layout->rep(1)))); - idx_n.push_back(add(offset_c_n, i32(n + nn/2*4 + (nn%2)*2*layout->fpw(1)*layout->rep(1) + 1))); - } - if(is_a_row){ - offset_a_m_[layout] = add(offset_a_m_[layout], urem(thread, i32(4))); - offset_a_k_[layout] = i32(0); - } - if(!is_b_row){ - offset_b_n_[layout] = add(offset_b_n_[layout], urem(thread, i32(4))); - offset_b_k_[layout] = i32(0); - } - /* axes */ - axes_[layout->get_axis(0)] = distributed_axis{1, idx_m, warp_0}; - axes_[layout->get_axis(1)] = distributed_axis{1, idx_n, warp_1}; - } - else{ - /* warp offset */ - Value *warp_0 = urem(warp, i32(layout->wpt(0))); - Value *warp_1 = urem(udiv(warp, i32(layout->wpt(0))), i32(layout->wpt(1))); - Value *off_warp_m = mul(warp_0, i32(layout->spw(0))); - Value *off_warp_n = mul(warp_1, i32(layout->spw(1))); - Value *off_lane_m = urem(lane, _16); - Value *off_lane_n = urem(lane, _8); - /* offsets */ - // a offset - offset_a_m_[layout] = add(off_warp_m, off_lane_m); - offset_a_k_[layout] = i32(0); - // b offsets - offset_b_n_[layout] = add(off_warp_n, off_lane_n); - offset_b_k_[layout] = i32(0); - // c offset - Value *off_c_m = add(udiv(lane, _4), off_warp_m); - Value *off_c_n = add(mul(_2, urem(lane, _4)), off_warp_n); - for(unsigned m = 0; m < shape[0]; m+=layout->shape_per_cta(0)){ - idx_m.push_back(add(off_c_m, i32(m))); - idx_m.push_back(add(off_c_m, i32(m + 8))); - } - for(unsigned n = 0; n < shape[1]; n+=layout->shape_per_cta(1)){ - idx_n.push_back(add(off_c_n, i32(n))); - idx_n.push_back(add(off_c_n, i32(n + 1))); - } - /* axes */ - axes_[layout->get_axis(0)] = distributed_axis{1, idx_m, warp_0}; - axes_[layout->get_axis(1)] = distributed_axis{1, idx_n, warp_1}; - } -} - -void generator::visit_layout_scanline(analysis::scanline_layout* layout) { - Value* thread_id = tgt_->get_local_id(mod_, *builder_, 0); - auto order = layout->get_order(); - const auto& shape = layout->get_shape(); - // Delinearize - size_t dim = shape.size(); - std::vector thread_ids(dim); - for(unsigned k = 0; k < dim - 1; k++){ - Constant *dim_k = i32(layout->mts(order[k])); - Value *rem = urem(thread_id, dim_k); - thread_id = udiv(thread_id, dim_k); - thread_ids[order[k]] = rem; - } - Constant *dim_k = i32(layout->mts(order[dim - 1])); - thread_ids[order[dim - 1]] = urem(thread_id, dim_k); - - // Create axes - for(unsigned k = 0; k < dim; k++) { - int nts = layout->nts(k); - int mts = layout->mts(k); - std::string str_k = std::to_string(k); - Value *contiguous_k = i32(nts); - Value *scaled_thread_ids = mul(thread_ids[k], contiguous_k); - unsigned per_cta = layout->shape_per_cta(k); - unsigned per_thread = nts * shape[k] / per_cta; - std::vector idx_list(per_thread); - for(unsigned n = 0 ; n < per_thread; n++){ - unsigned offset = n / nts * per_cta + n % nts; - idx_list[n] = add(scaled_thread_ids, i32(offset), "idx_" + str_k + "_" + std::to_string(n)); - } - axes_[layout->get_axis(k)] = distributed_axis{nts, idx_list, thread_ids[k]}; - } -} - -void generator::visit_layout_shared(analysis::shared_layout* layout) { - Type* ty = cvt(layout->get_type()); - PointerType *ptr_ty = ty->getPointerTo(shmem_->getType()->getPointerAddressSpace()); - if (layout->get_N_buffer()) { - // create pointers - shared_pre_ptr_[layout] = gep(shmem_, i32(alloc_->offset(layout))); - shared_pre_ptr_[layout] = bit_cast(shared_pre_ptr_[layout], ptr_ty); - - BasicBlock *current = builder_->GetInsertBlock(); - - auto info = *layout->get_N_buffer(); - ir::phi_node *phi = info.phi; - BasicBlock *parent = bbs_.at(phi->get_parent()); - if(parent->empty()) - builder_->SetInsertPoint(parent); - else if (const Instruction *first_non_phi = &*parent->getFirstNonPHI()) { - builder_->SetInsertPoint(&*parent->getFirstNonPHI()); - } else - builder_->SetInsertPoint(parent); - - // create smem_idx - read_smem_idx_[layout] = phi(i32_ty, 2); - write_smem_idx_[layout] = phi(i32_ty, 2); - - // create pointers - // ptr of the current iteration - shared_ptr_[layout] = phi(ptr_ty, 2); - // ptr of the next iteration - shared_next_ptr_[layout] = phi(ptr_ty, 2); - - builder_->SetInsertPoint(current); - } else if(layout->get_double_buffer()) { - BasicBlock *current = builder_->GetInsertBlock(); - auto info = *layout->get_double_buffer(); - ir::phi_node *phi = info.phi; - BasicBlock *parent = bbs_.at(phi->get_parent()); - if(parent->empty()) - builder_->SetInsertPoint(parent); - else - builder_->SetInsertPoint(&*parent->getFirstNonPHI()); - // create pointers - shared_ptr_[layout] = phi(ptr_ty, 2); - shared_pre_ptr_[layout] = gep(shmem_, i32(alloc_->offset(layout))); - shared_pre_ptr_[layout] = bit_cast(shared_pre_ptr_[layout], shared_ptr_[layout]->getType()); - shared_off_[layout] = phi(i32_ty, 2); - shared_next_ptr_[layout] = gep(shared_ptr_[layout], shared_off_[layout], "next_ptr"); - builder_->SetInsertPoint(current); - } else{ - size_t offset = alloc_->offset(layout); - shared_ptr_[layout] = gep(shmem_, i32(offset)); - shared_ptr_[layout] = bit_cast(shared_ptr_[layout], ptr_ty); - } -} - -void generator::visit_basic_block(ir::basic_block * block) { - - BasicBlock *parent = bbs_[block]; - builder_->SetInsertPoint(parent); - for(ir::instruction *i: block->get_inst_list()){ - visit_value(i); - // std::cout << "done" << std::endl; - } - // Update ir bb -> llvm bb mapping - bbs_[block] = builder_->GetInsertBlock(); -} - -void generator::visit_argument(ir::argument* arg) { - -} - -void generator::init_idx(ir::value *v) { - idxs_[v].clear(); - if(!v->get_type()->is_block_ty()){ - idxs_[v].push_back({}); - return; - } - if(layouts_->get(v)->to_shared()) - return; - const auto &shapes = v->get_type()->get_block_shapes(); - size_t rank = shapes.size(); - std::vector axes(rank); - std::vector ord(rank); - // compute axes - // std::cout << "axes" << std::endl; - for(size_t d = 0; d < shapes.size(); d++){ - // std::cout << d << " " << shapes[d] << std::endl; - // std::cout << a_axes_->get(v, d) << std::endl; - if(shapes[d] > 1){ - unsigned x = a_axes_->get(v, d); - axes[d] = axes_.at(x); - } - else{ - axes[d].contiguous = 1; - axes[d].values = {i32(0)}; - } - } - // std::cout << "axes ok" << std::endl; - // compute order - analysis::data_layout* layout = layouts_->get(v); - std::iota(ord.begin(), ord.end(), 0); - auto cmp = [&](int x, int y) { - unsigned axx = a_axes_->get(v, x); - unsigned axy = a_axes_->get(v, y); - size_t posx = layout->find_axis(axx); - size_t posy = layout->find_axis(axy); - if(posx < rank && posy < rank) - return layout->get_order(posx) < layout->get_order(posy); - return false; - }; - std::sort(ord.begin(), ord.end(), cmp); - ords_[v] = ord; - // indices - if(axes.size() == 1) - for(Value* x0: axes[ord[0]].values){ - idxs_[v].push_back({x0}); - } - if(axes.size() == 2) - for(Value* x1: axes[ord[1]].values) - for(Value* x0: axes[ord[0]].values){ - indices_t idx(2); - idx[ord[0]] = x0; - idx[ord[1]] = x1; - idxs_[v].push_back(idx); - } - if(axes.size() == 3) - for(Value* x2: axes[ord[2]].values) - for(Value* x1: axes[ord[1]].values) - for(Value* x0: axes[ord[0]].values){ - indices_t idx(3); - idx[ord[0]] = x0; - idx[ord[1]] = x1; - idx[ord[2]] = x2; - idxs_[v].push_back(idx); - } -} - -void generator::finalize_shared_layout(analysis::shared_layout *shared) { - if (auto n_buffer = shared->get_N_buffer()) { - // if (*_smem_idx == #stages-1) { - // *_smem_idx = 0; - // } else *_smem_idx++; - auto finalize_smem_idx = [&](auto &smem_idx, int init_stage) { - // insert point - Value *idx = smem_idx[shared]; - builder_->SetInsertPoint(bbs_.at(n_buffer->phi->get_parent())->getTerminator()); - Value *cond = icmp_eq(idx, i32(shared->get_num_stages()-1)); - PHINode *_ret = phi(i32_ty, 2); - Instruction *then_term = nullptr; - Instruction *else_term = nullptr; - Instruction *dummy = builder_->CreateRet(nullptr); - llvm::SplitBlockAndInsertIfThenElse(cond, _ret, &then_term, &else_term, nullptr); - dummy->removeFromParent(); - builder_->SetInsertPoint(then_term); - Value *zero_smem_idx = i32(0); - builder_->SetInsertPoint(else_term); - Value *inc_smem_idx = add(idx, i32(1)); - builder_->SetInsertPoint(_ret->getParent()); - _ret->addIncoming(zero_smem_idx, then_term->getParent()); - _ret->addIncoming(inc_smem_idx, else_term->getParent()); - // update ir::bb -> llvm::bb mapping - bbs_.at(n_buffer->phi->get_parent()) = builder_->GetInsertBlock(); - // idx = init_stage; - // loop: ... - if (auto idx_phi = llvm::dyn_cast(smem_idx[shared])) { - idx_phi->addIncoming(i32(init_stage), bbs_.at(n_buffer->phi->get_incoming_block(0))); - idx_phi->addIncoming(_ret, bbs_.at(n_buffer->phi->get_incoming_block(1))); - } else - throw std::runtime_error("Should be PHINode"); - }; - - // read_smem_idx is used by next_ptr to compute the next iteration value, so init value is 2 - finalize_smem_idx(read_smem_idx_, 2); - finalize_smem_idx(write_smem_idx_, shared->get_num_stages()-1); - - // finalize pointers - ir::phi_node *pn = n_buffer->phi; - BasicBlock *header = bbs_.at(pn->get_incoming_block(0)); - BasicBlock *loop = bbs_.at(pn->get_incoming_block(1)); - // %curr_ptr = phi %shared_pre_ptr, %next_ptr - // %next_ptr = phi %shared_pre_ptr[+1], (gep(%pre_ptr, read_smem_idx*per_stage_size)) - if (auto curr_ptr = dyn_cast(shared_ptr_[shared])) { - curr_ptr->addIncoming(shared_pre_ptr_[shared], header); - curr_ptr->addIncoming(shared_next_ptr_[shared], loop); - } else - throw std::runtime_error("Should be PHINode"); - - BasicBlock *current = builder_->GetInsertBlock(); - builder_->SetInsertPoint(header->getTerminator()); - Value *next_ptr_header = gep(shared_pre_ptr_[shared], i32(shared->get_per_stage_elements())); - builder_->SetInsertPoint(current->getTerminator()); - - assert(isa(shared_next_ptr_[shared])); - static_cast(shared_next_ptr_[shared])->addIncoming(next_ptr_header, header); - - Value *lds_offset = mul(read_smem_idx_[shared], i32(shared->get_per_stage_elements())); - Value *next_ptr = gep(shared_pre_ptr_[shared], lds_offset); - static_cast(shared_next_ptr_[shared])->addIncoming(next_ptr, loop); - } else if(shared->get_double_buffer()) { - auto info = *shared->get_double_buffer(); - ir::phi_node *phi = info.phi; - PHINode *ptr = (PHINode*)shmems_[phi]; - PHINode *offset = (PHINode*)shoffs_[phi]; - for(unsigned n = 0; n < phi->get_num_incoming(); n++){ - ir::basic_block* inc_block = phi->get_incoming_block(n); - ir::value* inc_val = phi->get_incoming_value(n); - BasicBlock *llvm_inc_block = bbs_.at(inc_block); - if(inc_val == info.latch){ - builder_->SetInsertPoint(llvm_inc_block->getTerminator()); - Value *next_offset = neg(offset); - offset->addIncoming(next_offset, llvm_inc_block); - } - else { - unsigned num_bytes = shared->get_type()->get_primitive_size_in_bits() / 8; - offset->addIncoming(i32(shared->get_size() / (2*num_bytes)), llvm_inc_block); - } - ptr->addIncoming(shmems_[inc_val], llvm_inc_block); - } - } -} - -void generator::finalize_function(ir::function *fn) { - // finalize double-buffering - for(const auto& x: layouts_->get_all()) - if(auto *shared = dynamic_cast(x.second)) - finalize_shared_layout(shared); - // finalize phi - for(ir::basic_block *block: fn->blocks()) - for(ir::instruction *inst: block->get_inst_list()) - if(auto *phi = dynamic_cast(inst)) - finalize_phi_node(phi); - for(auto& x: lazy_phi_incs_) - std::get<0>(x)->addIncoming(std::get<1>(x), bbs_[std::get<2>(x)]); -} - -void generator::finalize_phi_node(ir::phi_node *x) { - if(shmems_.find(x) != shmems_.end()) - return; - for(unsigned n = 0; n < x->get_num_incoming(); n++){ - ir::basic_block *_block = x->get_incoming_block(n); - BasicBlock *block = bbs_.at(_block); - for(indices_t idx: idxs_.at(x)){ - PHINode *phi = (PHINode*)vals_[x][idx]; - Value *inc = vals_[x->get_incoming_value(n)][idx]; - // x->print(std::cout); - phi->addIncoming(inc, block); - } - } -} - -void generator::packed_type(ir::value* i){ - Type* dtype = cvt(i->get_type()->get_tile_element_ty()); - auto* layout = dynamic_cast(layouts_->get(i)); - assert(layout); -} - -void generator::visit(ir::module &src, llvm::Module &dst) { - mod_ = &dst; - ctx_ = &dst.getContext(); - builder_ = new Builder(*ctx_); - // allocate shared memory - if(tgt_->is_gpu()) - if(unsigned alloc_size = alloc_->allocated_size()){ - Type *int_8_ty = Type::getInt8Ty(*ctx_); - Type *int_32_ty = Type::getInt32Ty(*ctx_); - ArrayType *array_ty = ArrayType::get(int_32_ty, 0); - Type *ptr_ty = ptr_ty(int_8_ty, 3); - GlobalVariable *sh_mem_array = - new GlobalVariable(*mod_, array_ty, false, GlobalVariable::ExternalLinkage, - nullptr, "__shared_ptr", nullptr, GlobalVariable::NotThreadLocal, 3); - shmem_ = bit_cast(sh_mem_array, ptr_ty); - } - // instantiate device functions -// for(ir::function *fn: src.get_function_list()) -// for(ir::basic_block *bb: fn->blocks()) -// for(ir::instruction *i: bb->get_inst_list()) -// if(auto *call = dynamic_cast(i)){ -// std::cout << "call??" << std::endl; -// } - // visit functions - for(ir::function *fn: src.get_function_list()) - forward_declare(fn); - for(ir::function *fn: src.get_function_list()) - visit_function(fn); -} - -void generator::add_extern_lib(const std::string &lib_name, - const std::string &lib_path) { - if (extern_lib_map_.count(lib_name) == 0) { - extern_lib_map_[lib_name] = create_extern_lib(lib_name, lib_path); - } else if (extern_lib_map_.at(lib_name)->path() != lib_path) { - throw std::runtime_error("A library has multiple paths (1) " + lib_path + - " (2) " + extern_lib_map_.at(lib_name)->path()); - } -} - -} // namespace codegen -} // namespace triton diff --git a/lib/codegen/target.cc b/lib/codegen/target.cc deleted file mode 100644 index 82ebbe64986a..000000000000 --- a/lib/codegen/target.cc +++ /dev/null @@ -1,173 +0,0 @@ -#include "triton/codegen/target.h" -#include "llvm/IR/IRBuilder.h" -#include "llvm/IR/Function.h" -#include "llvm/IR/Intrinsics.h" -#include "llvm/IR/IntrinsicsNVPTX.h" -#include "llvm/IR/IntrinsicsAMDGPU.h" -#include "llvm/IR/Value.h" -#include "llvm/IR/IRBuilder.h" -#include - -using namespace llvm; - -namespace triton{ -namespace codegen{ - -// base - - -nvidia_cu_target* target::as_nvidia() { - return dynamic_cast(this); -} - -bool target::is_gpu() const { - return is_gpu_; -} - -// AMD -void amd_cl_target::set_kernel(IRBuilder<>& builder, LLVMContext &ctx, Module *module, Function* fn) { - fn->setCallingConv(CallingConv::AMDGPU_KERNEL); -} - -Instruction* amd_cl_target::add_barrier(Module *module, IRBuilder<>& builder) { - Function *barrier = Intrinsic::getDeclaration(module, Intrinsic::amdgcn_s_barrier); - return builder.CreateIntrinsic(Intrinsic::amdgcn_s_barrier, {}, {}); -} - -Value* amd_cl_target::get_global_offset(Module *module, IRBuilder<>& builder, unsigned stride, unsigned ax) { - Value* group_id = get_block_id(module, builder, ax); - Value* result = builder.CreateMul(builder.getInt32(stride), group_id); - return result; -} - -Instruction* amd_cl_target::add_memfence(Module *module, IRBuilder<>& builder) { - throw std::runtime_error("not implemented"); -} - - -Value* amd_cl_target::get_block_id(Module *module, IRBuilder<>& builder, unsigned ax) { - static std::array ids = { - Intrinsic::amdgcn_workgroup_id_x, - Intrinsic::amdgcn_workgroup_id_y, - Intrinsic::amdgcn_workgroup_id_z - }; - Value* group_id = builder.CreateIntrinsic(ids[ax], {}, {}); - return group_id; -} - -Value* amd_cl_target::get_num_blocks(Module *module, IRBuilder<>& builder, unsigned ax) { - throw std::runtime_error("not implemented on AMD"); -} - -Value* amd_cl_target::get_local_id(Module *module, IRBuilder<>& builder, unsigned ax) { - static std::array ids = { - Intrinsic::amdgcn_workitem_id_x, - Intrinsic::amdgcn_workitem_id_y, - Intrinsic::amdgcn_workitem_id_z - }; - Function *get_local_id = Intrinsic::getDeclaration(module, ids[ax]); - return builder.CreateCall(get_local_id, {}); -} - -// NVIDIA - -void nvidia_cu_target::set_kernel(IRBuilder<>& builder, LLVMContext &ctx, Module *module, Function* fn){ - // set metadata - Metadata *md_args[] = { - ValueAsMetadata::get(fn), - MDString::get(ctx, "kernel"), - ValueAsMetadata::get(builder.getInt32(1)) - }; - module->getOrInsertNamedMetadata("nvvm.annotations")->addOperand(MDNode::get(ctx, md_args)); -} - -Instruction* nvidia_cu_target::add_barrier(Module *module, IRBuilder<>& builder) { - Function *barrier = Intrinsic::getDeclaration(module, Intrinsic::nvvm_barrier0); - return builder.CreateCall(barrier, {}); -} - -Instruction* nvidia_cu_target::add_memfence(Module *module, IRBuilder<>& builder) { - Function *barrier = Intrinsic::getDeclaration(module, Intrinsic::nvvm_membar_gl); - return builder.CreateCall(barrier, {}); -} - - -Value* nvidia_cu_target::get_global_offset(Module *module, IRBuilder<>& builder, unsigned stride, unsigned ax) { - Value* group_id = get_block_id(module, builder, ax); - Value* result = builder.CreateMul(builder.getInt32(stride), group_id); - return result; -} - -Value* nvidia_cu_target::get_block_id(Module *module, IRBuilder<>& builder, unsigned ax) { - static std::array cta_ids = { - Intrinsic::nvvm_read_ptx_sreg_ctaid_x, - Intrinsic::nvvm_read_ptx_sreg_ctaid_y, - Intrinsic::nvvm_read_ptx_sreg_ctaid_z - }; - Value* cta_id = builder.CreateIntrinsic(cta_ids[ax], {}, {}); - return cta_id; -} - -Value* nvidia_cu_target::get_local_id(Module *module, IRBuilder<>& builder, unsigned ax) { - static std::array ids = { - Intrinsic::nvvm_read_ptx_sreg_tid_x, - Intrinsic::nvvm_read_ptx_sreg_tid_y, - Intrinsic::nvvm_read_ptx_sreg_tid_z - }; - Function *get_local_id = Intrinsic::getDeclaration(module, ids[ax]); - return builder.CreateCall(get_local_id, {}); -} - -Value* nvidia_cu_target::get_num_blocks(Module *module, IRBuilder<>& builder, unsigned ax) { - static std::array ids = { - Intrinsic::nvvm_read_ptx_sreg_nctaid_x, - Intrinsic::nvvm_read_ptx_sreg_nctaid_y, - Intrinsic::nvvm_read_ptx_sreg_nctaid_z - }; - return builder.CreateIntrinsic(ids[ax], {}, {}); -} - -// CPU - -void cpu_target::set_kernel(IRBuilder<>& builder, LLVMContext &ctx, Module *module, Function* fn) { - // normal cpu functions can be kernels -} - -Instruction* cpu_target::add_barrier(Module *module, IRBuilder<>& builder) { - // no barrier on CPU - return (Instruction*)builder.CreateAdd(builder.getInt32(0), builder.getInt32(0)); -} - -Instruction* cpu_target::add_memfence(Module *module, IRBuilder<>& builder) { - // no barrier on CPU - return (Instruction*)builder.CreateAdd(builder.getInt32(0), builder.getInt32(0)); -} - - -Value* cpu_target::get_block_id(Module *module, llvm::IRBuilder<> &builder, unsigned ax) { - const Function *fn = builder.GetInsertBlock()->getParent(); - size_t num_params = fn->getFunctionType()->getNumParams(); - static std::array ids = { - fn->arg_begin() + num_params - 3, - fn->arg_begin() + num_params - 2, - fn->arg_begin() + num_params - 1 - }; - return (Argument*)ids[ax]; -} - -Value* cpu_target::get_num_blocks(Module *module, IRBuilder<>& builder, unsigned ax) { - throw std::runtime_error("not implemented"); -} - - -Value* cpu_target::get_global_offset(Module *module, IRBuilder<>& builder, unsigned stride, unsigned ax) { - Value* result = builder.CreateMul(builder.getInt32(stride), get_block_id(module, builder, ax)); - return result; -} - -Value* cpu_target::get_local_id(Module *module, IRBuilder<>& builder, unsigned ax) { - return builder.getInt32(0); -} - -} -} diff --git a/lib/codegen/transform/coalesce.cc b/lib/codegen/transform/coalesce.cc deleted file mode 100644 index 862ad1efe325..000000000000 --- a/lib/codegen/transform/coalesce.cc +++ /dev/null @@ -1,121 +0,0 @@ -#include -#include -#include "triton/ir/utils.h" -#include "triton/ir/instructions.h" -#include "triton/ir/function.h" -#include "triton/ir/module.h" -#include "triton/codegen/transform/coalesce.h" -#include "triton/codegen/analysis/align.h" -#include "triton/codegen/analysis/layout.h" - -namespace triton { -namespace codegen{ -namespace transform{ - -coalesce::coalesce(analysis::align* align, analysis::layouts *layouts, bool has_sm80) - : align_(align), layout_(layouts), has_sm80_(has_sm80) { } - -void coalesce::run(ir::module &mod) { - std::set invalidated; - ir::builder& builder = mod.get_builder(); - // add layout conversion instructions - for(ir::function *fn: mod.get_function_list()) - for(ir::basic_block *block: fn->blocks()) - for(ir::instruction* i: block->get_inst_list()){ - // coalesce before store - if(dynamic_cast(i) || dynamic_cast(i)) - if(ir::value* op = i->get_operand(1)) - if(op->get_type()->is_block_ty()) - if(op->get_type()->get_tile_ranks1() == 2) - if(invalidated.find(layout_->get(op)) == invalidated.end()) - if(layout_->get(op)->to_mma()) - if(dynamic_cast(i)->get_eviction_policy()==ir::io_inst::NORMAL){ - ir::instruction* new_op = ir::cvt_layout_inst::create(op); - builder.set_insert_point(i); - builder.insert(new_op); - i->replace_uses_of_with(op, new_op); - } - // coalesce before copy_to_shared - // only necessary for sm < 80 as Ampere+ can handle reduction - // on MMA layout - if(!has_sm80_) - if(dynamic_cast(i) || dynamic_cast(i)) - if(ir::value* op = i->get_operand(0)) - if(op->get_type()->is_block_ty()) - if(op->get_type()->get_tile_ranks1() == 2) - if(invalidated.find(layout_->get(op)) == invalidated.end()) - if(layout_->get(op)->to_mma()){ - ir::instruction* new_op = ir::cvt_layout_inst::create(op); - builder.set_insert_point(i); - builder.insert(new_op); - op->replace_all_uses_with(new_op); - new_op->replace_uses_of_with(new_op, op); - invalidated.insert(layout_->get(op)); - } - // uncoalesce after load - if(auto x = dynamic_cast(i)) - if(x->get_type()->is_block_ty()) - if(x->get_type()->get_tile_ranks1()==2) - if(layout_->get(x)->to_mma()) - if(!has_sm80_ || dynamic_cast(i)->get_eviction_policy()==ir::io_inst::NORMAL){ - builder.set_insert_point_after(x); - ir::instruction* new_x = ir::cvt_layout_inst::create(x); - builder.insert(new_x); - x->replace_all_uses_with(new_x); - new_x->replace_uses_of_with(new_x, x); - } - } - for(ir::function *fn: mod.get_function_list()) - for(ir::basic_block *block: fn->blocks()) - for(ir::instruction* i: block->get_inst_list()){ - // re-arrange scanline to promote memory coalescing - if(auto x = dynamic_cast(i)){ - ir::value* ptr = x->get_pointer_operand(); - ir::value* val = x->get_value_operand(); - auto out_contig = align_->contiguous(ptr); - auto val_inst = dynamic_cast(val); - if(!val_inst) - continue; - if(dynamic_cast(val)) - continue; - if(!val->get_type()->is_block_ty() || val->get_type()->get_tile_ranks1()==1) - continue; - std::vector in_contig; - std::vector queue = {val_inst}; - std::set seen; - std::vector ios; - while(!queue.empty()){ - ir::instruction* curr = queue.back(); - seen.insert(curr); - queue.pop_back(); - if(auto dot_inst = dynamic_cast(curr)) - break; - if(auto io_inst = dynamic_cast(curr)){ - in_contig = align_->contiguous(io_inst->get_pointer_operand()); - break; - } - for(ir::value* op: curr->ops()){ - auto inst_op = dynamic_cast(op); - if(!inst_op || seen.find(inst_op) != seen.end()) - continue; - if(!op->get_type()->is_block_ty() || - !val->get_type()->is_block_ty()) - continue; - if(op->get_type()->get_tile_num_elements() == - val->get_type()->get_tile_num_elements()) - queue.push_back(inst_op); - } - } - if(in_contig.size() <= 1 || out_contig==in_contig) - continue; - builder.set_insert_point_after(val_inst); - auto new_val = builder.insert(ir::cvt_layout_inst::create(val_inst)); - x->replace_uses_of_with(val_inst, new_val); - } - } -} - - -} -} -} diff --git a/lib/codegen/transform/cts.cc b/lib/codegen/transform/cts.cc deleted file mode 100644 index 4606b0f574c9..000000000000 --- a/lib/codegen/transform/cts.cc +++ /dev/null @@ -1,118 +0,0 @@ -#include "triton/codegen/analysis/layout.h" -#include "triton/codegen/transform/cts.h" -#include "triton/ir/module.h" -#include "triton/ir/function.h" -#include "triton/ir/basic_block.h" -#include "triton/ir/instructions.h" -#include "triton/ir/utils.h" -#include - -namespace triton { -namespace codegen{ -namespace transform{ - - -bool cts::is_shmem_op(ir::instruction* i, int op) { - if(i->get_id() == ir::INST_DOT) - return op == 0 || op == 1; - if(i->get_id() == ir::INST_COPY_FROM_SHARED) - return op==0; - if(i->get_id() == ir::INST_TRANS) - return op==0; - return false; -} - -bool cts::is_shmem_res(ir::value* v){ - ir::instruction* i = dynamic_cast(v); - if(!i) - return false; - if(i->get_id() == ir::INST_TRANS) - return true; - if(i->get_id() == ir::INST_COPY_TO_SHARED) - return true; - if(i->get_id() == ir::INST_MASKED_LOAD_ASYNC) - return true; - return false; -} - - -// run pass on module -void cts::add_copy(ir::instruction *parent, ir::value *x, ir::builder &builder, bool to_shared, std::map& copies) { - auto *i = dynamic_cast(x); - // not an instruction - if(!i) { - builder.set_insert_point(parent); - ir::value *copy; - if(to_shared) - copy = builder.create_copy_to_shared(x); - else - copy = builder.create_copy_from_shared(x); - parent->replace_uses_of_with(x, copy); - return; - } - // phi node - if(auto* phi = dynamic_cast(x)) { - for(unsigned i = 0; i < phi->get_num_incoming(); ++i) - add_copy(phi, phi->get_incoming_value(i), builder, to_shared, copies); - return; - } - // already in shared memory - if(to_shared && is_shmem_res(i)) - return; - // copy - builder.set_insert_point_after(i); - ir::value *copy; - if(to_shared){ - copy = builder.create_copy_to_shared(x); - } - else - copy = builder.create_copy_from_shared(x); - copies.insert({x, copy}); - parent->replace_uses_of_with(x, copies.at(x)); -} - -void cts::run(ir::module &mod) { - // Precompute where copies should be added - std::set shmem_ops; - std::set shmem_res; - ir::for_each_instruction(mod, [&](ir::instruction* i) { - if(i->get_id() == ir::INST_DOT){ - ir::dot_inst* dot = dynamic_cast(i); - ir::value* lhs = i->get_operand(0); - ir::type* ty = lhs->get_type()->get_scalar_ty(); - analysis::mma_layout* mma_lhs = layouts_->get(lhs)->to_mma(); - // TODO: V100 - bool is_lhs_shmem = !(mma_lhs && has_sm80_ && ty->get_primitive_size_in_bits() == 16 && !dot->is_trans_a()); - if(is_lhs_shmem) - shmem_ops.insert(lhs); - shmem_ops.insert(i->get_operand(1)); - } - if(i->get_id() == ir::INST_COPY_FROM_SHARED) - shmem_ops.insert(i->get_operand(0)); - if(i->get_id() == ir::INST_TRANS) - shmem_ops.insert(i->get_operand(0)); - if(i->get_id() == ir::INST_TRANS || - i->get_id() == ir::INST_COPY_TO_SHARED || - i->get_id() == ir::INST_MASKED_LOAD_ASYNC) - shmem_res.insert(i); - }); - - // Add shared copies - std::map copies; - ir::builder &builder = mod.get_builder(); - ir::for_each_instruction(mod, [&](ir::instruction* i) { - size_t num_op = i->get_num_operands(); - for(size_t k = 0; k < num_op; k++){ - ir::value* op = i->get_operand(k); - // copy to shared operands - bool is_shmem_op = shmem_ops.find(op) != shmem_ops.end(); - if(is_shmem_op) - add_copy(i, op, builder, true, copies); - } - }); -} - - -} -} -} \ No newline at end of file diff --git a/lib/codegen/transform/dce.cc b/lib/codegen/transform/dce.cc deleted file mode 100644 index 7416ff6e8908..000000000000 --- a/lib/codegen/transform/dce.cc +++ /dev/null @@ -1,79 +0,0 @@ -#include "triton/codegen/transform/dce.h" -#include "triton/ir/function.h" -#include "triton/ir/basic_block.h" -#include "triton/ir/module.h" -#include "triton/ir/utils.h" -#include - -namespace triton { -namespace codegen{ -namespace transform{ - - -void dce::run(ir::module &mod) { - std::list work_list; - std::set marked; - - // initialize work-list - for(ir::function *fn: mod.get_function_list()){ - std::vector rpo = ir::cfg::reverse_post_order(fn); - // iterate through blocks - for(ir::basic_block *block: rpo) - for(ir::instruction *i: block->get_inst_list()){ - switch(i->get_id()){ - case ir::INST_RETURN: - case ir::INST_UNCOND_BRANCH: - case ir::INST_COND_BRANCH: - case ir::INST_UNMASKED_STORE: - case ir::INST_MASKED_STORE: - case ir::INST_ATOMIC_CAS: - case ir::INST_ATOMIC_RMW: - case ir::INST_ATOMIC_EXCH: - case ir::INST_CALL: - case ir::INST_LAUNCH: - case ir::INST_BARRIER: { - work_list.push_back(i); - marked.insert(i); - break; - } - default: - break; - } - } - } - - // mark -- ignore branches - while(!work_list.empty()){ - ir::instruction* current = work_list.back(); - work_list.pop_back(); - // mark instruction operands - for(ir::value* op: current->ops()) { - if(auto *i = dynamic_cast(op)){ - if(marked.insert(i).second) - work_list.push_back(i); - } - } - // TODO: mark last intstruction of current's reverse-dominance frontier - } - - // sweep -- delete non-branch unmarked instructions - std::vector to_delete; - for(ir::function *fn: mod.get_function_list()){ - std::vector rpo = ir::cfg::reverse_post_order(fn); - // iterate through blocks - for(ir::basic_block *block: rpo) - for(ir::instruction *i: block->get_inst_list()){ - if(marked.find(i) == marked.end()) - to_delete.push_back(i); - } - } - - - // delete - for(ir::instruction* i: to_delete) - i->erase_from_parent(); -} - -} -} -} diff --git a/lib/codegen/transform/disassociate.cc b/lib/codegen/transform/disassociate.cc deleted file mode 100644 index 2709125f8501..000000000000 --- a/lib/codegen/transform/disassociate.cc +++ /dev/null @@ -1,62 +0,0 @@ -#include "triton/codegen/transform/disassociate.h" -#include "triton/ir/utils.h" -#include "triton/ir/instructions.h" -#include "triton/ir/builder.h" -#include "triton/ir/module.h" -#include - -namespace triton { -namespace codegen{ -namespace transform{ - -ir::instruction* rematerialize(ir::builder& bld, ir::instruction *root, - std::set& seen) { - if (dynamic_cast(root)) - return root; - if(!seen.insert(root).second) - return root; - if(!root->get_type()->is_block_ty()) - return root; - - bld.set_insert_point(root); - ir::instruction *new_root = bld.insert(root->clone()); - for(ir::value *op: root->ops()){ - ir::instruction *i = dynamic_cast(op); - if(!i || i->get_id() == ir::INST_REDUCE) - continue; - ir::instruction* new_op = rematerialize(bld, i, seen); - new_root->replace_uses_of_with(op, new_op); - } - return new_root; -} - -void disassociate::run(ir::module &mod) { - ir::builder &bld = mod.get_builder(); - -// ir::for_each_instruction(mod, [&](ir::instruction *i){ -// bld.set_insert_point(i); -// for(ir::value* op: i->ops()){ -// auto reshape = dynamic_cast(op); -// if(!reshape) -// continue; -// ir::instruction* new_op = bld.insert(reshape->clone()); -// i->replace_uses_of_with(op, new_op); -// } -// }); - - - ir::for_each_instruction(mod, [&](ir::instruction *i){ - if(dynamic_cast(i) || dynamic_cast(i)){ - std::set seen; - ir::instruction* new_i = rematerialize(bld, i, seen); - i->replace_all_uses_with(new_i); - } - }); - - -} - - -} -} -} diff --git a/lib/codegen/transform/inline.cc b/lib/codegen/transform/inline.cc deleted file mode 100644 index cee932c3ae6b..000000000000 --- a/lib/codegen/transform/inline.cc +++ /dev/null @@ -1,147 +0,0 @@ -#include -#include "triton/codegen/transform/inline.h" -#include "triton/ir/module.h" -#include "triton/ir/function.h" -#include "triton/ir/utils.h" - -namespace triton{ -namespace codegen{ -namespace transform{ - - -bool fncmp::operator()(ir::function* x, ir::function* y) const { - auto fn_list = x->get_parent()->get_function_list(); - return std::find(fn_list.begin(), fn_list.end(), x) < std::find(fn_list.begin(), fn_list.end(), y); -}; - -void inliner::do_inline(ir::function* fn, ir::call_inst* callsite, ir::builder& builder, - std::list& callsites){ - ir::basic_block* parent_block = callsite->get_parent(); - ir::function* parent_fn = parent_block->get_parent(); - // the parent block is split into block A and block B: - // - block A (`new_blocks[0]`) is the entry block of the inlined function - // - block B (`exit`) resumes execution of the parent function - ir::basic_block* entry = parent_block->split_before(callsite, fn->get_name()); - ir::basic_block* exit = entry->get_successors()[0]; - std::vector new_blocks = {entry}; - for(size_t i = 1; i < fn->blocks().size(); i++){ - ir::basic_block* block = fn->blocks()[i]; - ir::context& ctx = block->get_context(); - const std::string& name = block->get_parent()->get_name() + "_" + block->get_name(); - new_blocks.push_back(ir::basic_block::create(ctx, name, parent_fn)); - } - // a phi node holds the return values of the inlined function - if(exit->get_inst_list().empty()) - builder.set_insert_point(exit); - else - builder.set_insert_point(exit->get_first_non_phi()); - ir::phi_node* exit_val = builder.create_phi(fn->get_fn_type()->get_return_ty(), 0); - callsite->replace_all_uses_with(exit_val); - callsite->erase_from_parent(); - // get arguments `fn` is called with - std::vector tgt_args(callsite->op_begin(), callsite->op_end()); - std::vector src_args(fn->args().begin(), fn->args().end()); - // Actually generate the instructions: - // - Remove the branch created by basic_block::split_before - // - Clone all instructions - // - Replace `ret` with incoming nodes to `exit_val` and branches to `exit` - ir::instruction* terminator = new_blocks[0]->get_inst_list().back(); -// new_blocks[0]->get_inst_list().back()->erase_from_parent(); - terminator->erase_from_parent(); - std::map inst_map; - std::map arg_map; - for(size_t k = 0; k < fn->args().size(); k++) - arg_map[fn->args()[k]] = callsite->ops()[k]; - std::vector rpo = ir::cfg::reverse_post_order(fn); - // clone instructions - for(size_t i = 0; i < new_blocks.size(); i++){ - ir::basic_block* old_block = fn->blocks()[i]; - ir::basic_block* new_block = new_blocks[i]; - builder.set_insert_point(new_block); - for(ir::instruction* old_inst: old_block->get_inst_list()){ - ir::instruction* new_inst = old_inst->clone(); - inst_map[old_inst] = new_inst; - builder.insert(new_inst); - } - } - // update basic blocks - for(size_t i = 0; i < new_blocks.size(); i++) { - for (ir::instruction* new_inst: new_blocks[i]->get_inst_list()) { - // replace basic use cases - for(size_t k = 0; k < new_blocks.size(); k++) - new_inst->replace_uses_of_with(fn->blocks()[k], new_blocks[k]); - if(ir::phi_node* phi = dynamic_cast(new_inst)) { - // additionally replace basic blocks of phi-nodes since - // replace_uses_of_with() does not replace them. - for(unsigned in = 0; in < phi->get_num_incoming(); in++) - for(size_t k = 0; k < new_blocks.size(); k++) - if (phi->get_incoming_block(in) == fn->blocks()[k]) - phi->set_incoming_block(in, new_blocks[k]); - } - } - } - // replace operands of instructions after constructing inst_map - for (auto& it: inst_map) { - ir::instruction* new_inst = it.second; - for(size_t k = 0; k < new_inst->get_num_operands(); k++) { - ir::value* op = new_inst->get_operand(k); - if(auto arg_op = dynamic_cast(op)) - new_inst->set_operand(k, arg_map.at(arg_op)); - if(auto inst_op = dynamic_cast(op)) - if(inst_map.find(inst_op) != inst_map.end()) - new_inst->set_operand(k, inst_map.at(inst_op)); - } - // handles a ret instruction. - // instead of returning we need to branch to after the function call - if(ir::return_inst* ret = dynamic_cast(new_inst)) { - if(ir::value* ret_val = ret->get_return_value()) - exit_val->add_incoming(ret_val, new_inst->get_parent()); - // replace ret with branch - ir::instruction* new_br_inst = ir::branch_inst::create(exit); - builder.set_insert_point(new_inst->get_parent()); - builder.insert(new_br_inst); - new_inst->erase_from_parent(); - } - } - if(exit_val->get_num_incoming() == 1) - exit_val->replace_all_uses_with(exit_val->get_incoming_value(0)); - // done -- make sure insert point is properly set to exit block - builder.set_insert_point(exit); -} - -void inliner::run(ir::module &mod) { - - // gather all call sites - while(true){ - std::map counts; - for(ir::function* fn: mod.get_function_list()) - counts[fn] = 0; - - std::list callsites; - for(ir::function* fn: mod.get_function_list()){ - for(ir::basic_block* block: fn->blocks()) - for(ir::instruction* instr: block->get_inst_list()) - if(ir::call_inst* call = dynamic_cast(instr)){ - callsites.push_back(call); - counts[call->get_fn()] += 1; - } - } - - for(auto& count: counts){ - if(!count.first->get_is_kernel() && count.second == 0) - count.first->get_parent()->remove_function(count.first); - } - - if(callsites.empty()) - break; - - for(ir::call_inst* call: callsites) - do_inline(call->get_fn(), call, mod.get_builder(), callsites); - } - - -} - -} -} -} diff --git a/lib/codegen/transform/membar.cc b/lib/codegen/transform/membar.cc deleted file mode 100644 index 224f44e9b970..000000000000 --- a/lib/codegen/transform/membar.cc +++ /dev/null @@ -1,254 +0,0 @@ -#include -#include -#include -#include "triton/codegen/analysis/layout.h" -#include "triton/codegen/analysis/allocation.h" -#include "triton/codegen/transform/membar.h" -#include "triton/codegen/transform/prefetch.h" -#include "triton/ir/module.h" -#include "triton/ir/function.h" -#include "triton/ir/basic_block.h" -#include "triton/ir/instructions.h" -#include "triton/ir/utils.h" - -namespace triton { - -namespace codegen{ -namespace transform{ - - - -int membar::group_of(ir::value* v, std::vector &async_write) { - if(ir::phi_node* phi = dynamic_cast(v)){ - analysis::shared_layout* layout = layouts_->get(v)->to_shared(); - if (analysis::double_buffer_info_t* info = layout->get_double_buffer()) - return group_of(info->first, async_write); - else if (analysis::N_buffer_info_t* info = layout->get_N_buffer()) { - if (v == info->phi) - return group_of(info->firsts[0], async_write); - else // prefetched value - return group_of(info->firsts[1], async_write); - } - std::vector groups(phi->get_num_operands()); - std::transform(phi->op_begin(), phi->op_end(), groups.begin(), [&](ir::value* v){ return group_of(v, async_write);}); - return *std::max_element(groups.begin(), groups.end()); - } - else{ - if(layouts_->has_tmp(v)) - return async_write.size() - 1; - // // Ignore copy_to_shared. It won't modify async behavior. - // if(dynamic_cast(v)) - // return 0; - auto it = std::find(async_write.begin(), async_write.end(), v); - return std::distance(async_write.begin(), it); - } -} - -inline bool membar::intersect_with(analysis::shared_layout* a_layout, analysis::shared_layout* b_layout) { - if(!a_layout || !b_layout) - return false; - int a_start = alloc_->offset(a_layout); - int a_end = a_start + a_layout->get_size(); - int b_start = alloc_->offset(b_layout); - int b_end = b_start + b_layout->get_size(); - if(a_start < b_end || b_start < a_end) - return true; - return false; -} - -membar::val_set_t membar::intersect_with(const val_set_t& as, const val_set_t& bs) { - val_set_t ret; - for(ir::value* a: as){ - if(!a->get_type()->is_block_ty()) - continue; - analysis::shared_layout* a_layout = layouts_->get(a)->to_shared(); - analysis::shared_layout* a_tmp = layouts_->has_tmp(a) ? layouts_->get(layouts_->tmp(a))->to_shared() : nullptr; - analysis::shared_layout* a_tmp_index = layouts_->has_tmp_index(a) ? layouts_->get(layouts_->tmp_index(a))->to_shared() : nullptr; - for(ir::value* b: bs){ - if(!b->get_type()->is_block_ty()) - continue; - analysis::shared_layout* b_layout = layouts_->get(b)->to_shared(); - analysis::shared_layout* b_tmp = layouts_->has_tmp(b) ? layouts_->get(layouts_->tmp(b))->to_shared() : nullptr; - analysis::shared_layout* b_tmp_index = layouts_->has_tmp_index(b) ? layouts_->get(layouts_->tmp_index(b))->to_shared() : nullptr; - if(intersect_with(a_layout, b_layout) || - intersect_with(a_layout, b_tmp) || - intersect_with(a_layout, b_tmp_index) || - intersect_with(a_tmp, b_layout) || - intersect_with(a_tmp, b_tmp) || - intersect_with(a_tmp, b_tmp_index) || - intersect_with(a_tmp_index, b_layout) || - intersect_with(a_tmp_index, b_tmp) || - intersect_with(a_tmp_index, b_tmp_index)) - ret.insert(b); - } - } - return ret; -} - -bool membar::check_safe_war(ir::instruction* i) { - bool is_i_shared_block = i->get_type()->is_block_ty() && - layouts_->get(i)->to_shared(); - bool is_i_double_buffered = is_i_shared_block && - layouts_->get(i)->to_shared()->get_double_buffer(); - bool is_i_n_buffered = is_i_shared_block && - layouts_->get(i)->to_shared()->get_N_buffer(); - - if (is_i_double_buffered || is_i_n_buffered) { - // with async copy & prefetch_s disabled, WARs are not safe - if (dynamic_cast(i) && !prefetch_->is_prefetched(i)) - return false; - else - return true; - } - return false; -} - -void membar::transfer(ir::basic_block *block, - val_vec_t& async_write, - val_set_t& sync_write, - val_set_t& sync_read, - std::set& safe_war, - bool& inserted, ir::builder& builder) { - std::vector async_waits; - ir::basic_block::inst_list_t instructions = block->get_inst_list(); - for(ir::instruction *i: instructions){ - if(dynamic_cast(i)) - continue; - if(std::find(async_write.begin(), async_write.end(), i) == async_write.end() && - dynamic_cast(i)){ - async_write.push_back(i); - } - if(dynamic_cast(i)) - sync_write.insert(i); - ir::barrier_inst* barrier = dynamic_cast(i); - ir::async_wait_inst* async_wait = dynamic_cast(i); - // Get shared memory reads - std::set read; - std::copy_if(i->op_begin(), i->op_end(), std::inserter(read, read.begin()), - [&](ir::value* i){ return i->get_type()->is_block_ty() && layouts_->get(i)->to_shared();}); - if(layouts_->has_tmp(i)) - read.insert(i); - // RAW (async) - val_set_t tmp; - std::copy(async_write.begin(), async_write.end(), std::inserter(tmp, tmp.begin())); - if(intersect_with(read, tmp).size()){ - std::vector groups(read.size()); - std::transform(read.begin(), read.end(), groups.begin(), [&](ir::value* v){ return group_of(v, async_write);}); - int N = *std::max_element(groups.begin(), groups.end()); - if(N < async_write.size()){ - builder.set_insert_point(i); - async_wait = (ir::async_wait_inst*)builder.create_async_wait(async_write.size() - 1 - N); - barrier = (ir::barrier_inst*)builder.create_barrier(); - inserted = true; - async_waits.push_back(async_wait); - } - } - // RAW, WAR - bool is_safe_war = check_safe_war(i); - // WAR barrier is not required when data is double-buffered - if(!intersect_with(read, sync_write).empty() || - (!intersect_with({i}, sync_read).empty() && !is_safe_war)) { - builder.set_insert_point(i); - barrier = (ir::barrier_inst*)builder.create_barrier(); - inserted = true; - } - // update state of asynchronous copies - if(async_wait){ - int N = async_write.size() - async_wait->get_N(); - async_write.erase(async_write.begin(), async_write.begin() + N); - } - // all the copy_to_shared and read from shared are synchronized after barrier - if(barrier){ - sync_write.clear(); - sync_read.clear(); - } - sync_read.insert(read.begin(), read.end()); - } - - // coalesce barriers - // fixme: to support more general cases - if (async_waits.size() == 2) { - // (aw N; bar; prefetch; aw N-1; bar; prefetch; => aw N-1; bar; 2*prefetch;) - for (int idx=0; idx to_erase; - ir::basic_block::inst_list_t instructions = block->get_inst_list(); - for(auto iter = instructions.begin(); iter != instructions.end(); ++iter){ - ir::instruction *i = *iter; - if (static_cast(first_async_wait) == i) { - // peak next 5 instructions - auto peak_iter = std::next(iter); - if (std::distance(peak_iter, instructions.end()) >= 5) { - auto first_bar = dynamic_cast(*peak_iter++); - auto first_pf = dynamic_cast(*peak_iter++); - auto second_async_wait = dynamic_cast(*peak_iter++); - auto second_bar = dynamic_cast(*peak_iter++); - auto second_pf = dynamic_cast(*peak_iter); - if (first_bar && first_pf && second_async_wait && second_bar && second_pf) { - int first_n = first_async_wait->get_N(); - int second_n = second_async_wait->get_N(); - to_erase.push_back(second_async_wait); - to_erase.push_back(second_bar); - first_async_wait->set_N(second_n); - } - } else - break; - for (ir::instruction *i : to_erase) - block->erase(i); - } - } - } - } -} - -void membar::run(ir::module &mod) { - ir::builder &builder = mod.get_builder(); - // extract phi-node associates with double-buffered - // shared-memory copies. These can be read from and written to - // without needing synchronization - std::set safe_war; - for(const auto& x: layouts_->get_all()){ - analysis::shared_layout* layout = x.second->to_shared(); - if(!layout || !layout->get_double_buffer() || !layout->get_N_buffer()) - continue; - for(ir::value *v: layout->get_values()) - if(v != layout->get_double_buffer()->phi){ - safe_war.insert(v); - } - } - - for(ir::function *fn: mod.get_function_list()){ - std::vector rpo = ir::cfg::reverse_post_order(fn); - std::map async_writes; - std::map sync_writes; - std::map sync_reads; - std::list pipelined; - bool inserted; - do{ - inserted = false; - // find barrier location - for(ir::basic_block *block: rpo){ - // join inputs - val_vec_t async_write; - val_set_t sync_write; - val_set_t sync_read; - val_set_t tmp; - for(ir::basic_block* pred: block->get_predecessors()){ - for(ir::value* v: async_writes[pred]) - if(tmp.insert(v).second) - async_write.push_back(v); - sync_write.insert(sync_writes[pred].begin(), sync_writes[pred].end()); - sync_read.insert(sync_reads[pred].begin(), sync_reads[pred].end()); - } - transfer(block, async_write, sync_write, sync_read, safe_war, inserted, builder); - async_writes[block] = async_write; - sync_writes[block] = sync_write; - sync_reads[block] = sync_read; - } - }while(inserted); - } -} - -} -} -} diff --git a/lib/codegen/transform/peephole.cc b/lib/codegen/transform/peephole.cc deleted file mode 100644 index a7d3f8240443..000000000000 --- a/lib/codegen/transform/peephole.cc +++ /dev/null @@ -1,331 +0,0 @@ -#include -#include -#include "triton/ir/module.h" -#include "triton/ir/function.h" -#include "triton/codegen/transform/peephole.h" -#include "triton/codegen/analysis/layout.h" - -namespace triton { -namespace codegen{ -namespace transform{ - - -ir::value* rewrite_trans_phi_impl(ir::value *value, ir::builder &builder, - const std::vector& perm) { - if(auto phi = dynamic_cast(value)) { - // transpose operands - std::vector incs; - for(unsigned n = 0; n < phi->get_num_incoming(); n++) - incs.push_back(rewrite_trans_phi_impl(phi->get_incoming_value(n), builder, perm)); - // create phi for transposed values - builder.set_insert_point(phi); - ir::phi_node* result = builder.create_phi(incs[0]->get_type(), incs.size()); - for(unsigned n = 0; n < phi->get_num_incoming(); n++) - result->add_incoming(incs[n], phi->get_incoming_block(n)); - return result; - } - else if(auto i = dynamic_cast(value)){ - ir::basic_block* block = i->get_parent(); - auto it = std::find(block->begin(), block->end(), i); - it++; - builder.set_insert_point(it); - ir::instruction *trans = (ir::instruction*)builder.create_trans(i, perm); - trans->set_operand(0, i); - return trans; - } - return nullptr; -} - -bool peephole::rewrite_trans_phi(ir::instruction* value, ir::builder& builder) { - auto trans = dynamic_cast(value); - if(!trans) - return false; - auto users = trans->get_users(); - auto ops = trans->ops(); - if(users.size() > 1 || ops.size() > 1) - return false; - ir::value* op = *ops.begin(); - // trans(phi) -> phi(trans(), trans()...) - auto* phi = dynamic_cast(op); - if(!phi) - return false; - ir::value* new_phi = rewrite_trans_phi_impl(phi, builder, trans->get_perm()); - if(!new_phi) - return false; - trans->replace_all_uses_with(new_phi); - - return true; -} - -bool peephole::rewrite_dot(ir::instruction *value, ir::builder& builder){ - // dot(a, b, c) + d -> dot(a, b, c + d) - // d + dot(a, b, c) -> dot(a, b, c + d) - auto add = dynamic_cast(value); - if(add && (add->get_op() == ir::binary_op_t::FAdd || add->get_op() == ir::binary_op_t::Add)) { - bool is_int_dot = add->get_op() == ir::binary_op_t::Add; - ir::value *lhs = add->get_operand(0); - ir::value *rhs = add->get_operand(1); - ir::dot_inst *lhs_dot = dynamic_cast(lhs); - ir::dot_inst *rhs_dot = dynamic_cast(rhs); - if(!lhs_dot && !rhs_dot) - return false; - ir::dot_inst *dot = lhs_dot ? lhs_dot : rhs_dot; - ir::value *other = (dot == lhs) ? rhs : lhs; - ir::value *acc = dot->get_operand(2); - ir::splat_inst *splat = dynamic_cast(acc); - ir::constant *_0 = nullptr; - if(splat) - _0 = dynamic_cast(splat->get_operand(0)); - if(!_0) - return false; - if (auto *fp_0 = dynamic_cast(_0)) - if (fp_0->get_value() != 0.0) - return false; - if (auto *int_0 = dynamic_cast(_0)) - if (int_0->get_value() != 0) - return false; - ir::value *a = dot->get_operand(0); - ir::value *b = dot->get_operand(1); - builder.set_insert_point(add); - ir::value * new_dot = builder.insert(ir::dot_inst::create(a, b, other, dot->is_trans_a(), dot->is_trans_b(), dot->allow_tf32(), dot->get_name())); - add->replace_all_uses_with(new_dot); - return true; - } - return false; -} - -//bool peephole::rewrite_cts_cfs(ir::instruction *value, ir::builder &builder){ -// auto cfs = dynamic_cast(value); -// if(cfs) { -// ir::value *arg = cfs->get_operand(0); -// ir::copy_to_shared_inst* cts = dynamic_cast(arg); -// if(!cts) -// return false; -// cfs->replace_all_uses_with(cts->get_operand(0)); -// return true; -// } - -//} - -bool peephole::rewrite_load_to_shared(ir::instruction *value, ir::builder& builder){ - auto copy_to_shared = dynamic_cast(value); - if(!copy_to_shared) - return false; - ir::value *arg = copy_to_shared->get_operand(0); - ir::masked_load_inst* ld = dynamic_cast(arg); - if(!ld) - return false; - builder.set_insert_point(copy_to_shared); - ir::value *ptr = ld->get_pointer_operand(); - ir::value *msk = ld->get_mask_operand(); - ir::value *val = ld->get_false_value_operand(); - analysis::scanline_layout* layout = layouts_->get(ptr)->to_scanline(); - int nts = layout->nts(layout->get_order()[0]); - int dtsize = value->get_type()->get_scalar_ty()->get_primitive_size_in_bits() / 8; - if(nts*dtsize >= 4){ - ir::value* new_load = builder.create_masked_load_async(ptr, msk, val, ld->get_cache_modifier(), ld->get_eviction_policy()); - copy_to_shared->replace_all_uses_with(new_load); - return true; - } - return false; -// analysis::scanline_layout* layout = layouts_->get(ptr)->to_scanline(); -// std::cout << layout->nts(layout->get_order(0)) << std::endl; -// return true; - -} - -bool peephole::rewrite_unit_red(ir::instruction *value, ir::builder& builder){ - auto x = dynamic_cast(value); - if(!x) - return false; - ir::value *arg = x->get_operand(0); - auto shapes = arg->get_type()->get_block_shapes(); - if(shapes[x->get_axis()] == 1){ - builder.set_insert_point(x); - ir::value* new_red = builder.create_reshape(arg, x->get_type()->get_block_shapes()); - x->replace_all_uses_with(new_red); - return true; - } - return false; -} - -bool peephole::rewrite_mult(ir::instruction *value, ir::builder& builder) { - auto binop = dynamic_cast(value); - if(binop && binop->get_op() == ir::binary_op_t::Mul) { - ir::value *lhs = binop->get_operand(0); - ir::value *rhs = binop->get_operand(1); - ir::constant_int *_1_lhs = nullptr; - if(ir::splat_inst *splat = dynamic_cast(lhs)){ - auto *cst = dynamic_cast(splat->get_operand(0)); - if(cst && cst->get_value() == 1) - _1_lhs = cst; - } - ir::constant_int *_1_rhs = nullptr; - if(ir::splat_inst *splat = dynamic_cast(rhs)){ - auto *cst = dynamic_cast(splat->get_operand(0)); - if(cst && cst->get_value() == 1) - _1_rhs = cst; - } - if(_1_lhs){ - binop->replace_all_uses_with(rhs); - return true; - } - else if(_1_rhs){ - binop->replace_all_uses_with(lhs); - return true; - } - } - return false; -} - -bool peephole::rewrite_insert_extract(ir::instruction *value, ir::builder& builder){ - auto extracted = dynamic_cast(value); - if(!extracted) - return false; - size_t extract_idx = extracted->get_idx(); - ir::value* agg = extracted->get_operand(0); - auto insert = dynamic_cast(agg); - while(insert){ - agg = insert->get_operand(0); - ir::value* inserted = insert->get_operand(1); - size_t insert_idx = insert->get_idx(); - insert = dynamic_cast(agg); - if(extract_idx == insert_idx){ - extracted->replace_all_uses_with(inserted); - return true; - } - insert = dynamic_cast(agg); - } - return false; -} - - -bool peephole::rewrite_gep_ptr_min_off_plus_off(ir::instruction *value, ir::builder& builder) { - auto x = dynamic_cast(value); - if(!x) - return false; - auto y = dynamic_cast(x->get_pointer_operand()); - if(!y) - return false; - auto idx = *y->idx_begin(); - auto z = dynamic_cast(idx); - if(!z) - return false; - bool is_sub = z->get_op() == ir::binary_op_t::Sub; - auto *lhs = dynamic_cast(z->get_operand(0)); - bool is_lhs_0 = lhs && (lhs->get_value()==0); - bool is_rhs_eq_x_rhs = z->get_operand(1) == *x->idx_begin(); - if(is_sub && is_lhs_0 && is_rhs_eq_x_rhs){ - x->replace_all_uses_with(y->get_pointer_operand()); - return true; - } - return false; -} - -bool peephole::rewrite_select_masked_load(ir::instruction *value, ir::builder& builder){ - auto select = dynamic_cast(value); - if(!select) - return false; - auto if_value = dynamic_cast(select->get_if_value_op()); - if(!if_value) - return false; - if(select->get_pred_op() != if_value->get_mask_operand()) - return false; - builder.set_insert_point(select); - ir::value* new_load = builder.create_masked_load(if_value->get_pointer_operand(), - if_value->get_mask_operand(), - select->get_else_value_op(), - if_value->get_cache_modifier(), - if_value->get_eviction_policy(), - if_value->get_is_volatile()); - select->replace_all_uses_with(new_load); - return true; -} - -bool peephole::rewrite_cvt_layout(ir::instruction *value, ir::builder& builder){ - auto cvt = dynamic_cast(value); - if(!cvt) - return false; - ir::instruction* op = dynamic_cast(cvt->get_operand(0)); - if(!op) - return false; -// // convert(elementwise(x, y)) = elementwise(convert(x), convert(y)) -// if(op->get_id() == ir::INST_BINOP){ -// for(size_t i = 0; i < op->get_num_operands(); i++){ -// ir::value* arg_i = op->get_operand(i); -// builder.set_insert_point(op); -// // create new layout transform -// ir::instruction* new_arg_i = cvt->clone(); -// layouts_->copy(new_arg_i, op); -// builder.insert(new_arg_i); -// // set the right args -// new_arg_i->replace_uses_of_with(new_arg_i->get_operand(0), arg_i); -// op->replace_uses_of_with(arg_i, new_arg_i); -// } -// cvt->replace_all_uses_with(op); -// return true; -// } - auto cvt_op = dynamic_cast(op); - if(!cvt_op) - return false; - // convert1(convert2(x)) if convert1 is the inverse of convert2 - ir::value* op_op = cvt_op->get_operand(0); - if(layouts_->has(cvt) && layouts_->has(op_op) && - layouts_->get(cvt) && layouts_->get(op_op)){ - cvt->replace_all_uses_with(op_op); - return true; - } - return false; -} - -void peephole::run(ir::module &mod) { - ir::builder &builder = mod.get_builder(); - // keep track of whether any modification was made - std::set seen; - size_t n_seen; - - // rewrite dots first - do{ - n_seen = seen.size(); - for(ir::function *fn: mod.get_function_list()) - for(ir::basic_block *block: fn->blocks()) - for(ir::instruction* i: block->get_inst_list()){ - if(seen.find(i) != seen.end()) - continue; - bool was_modified = rewrite_dot(i, builder); - if(was_modified){ - seen.insert(i); - } - } - }while(seen.size() != n_seen); - - // rewrite other ops - seen.clear(); - do{ - n_seen = seen.size(); - for(ir::function *fn: mod.get_function_list()) - for(ir::basic_block *block: fn->blocks()) - for(ir::instruction* i: block->get_inst_list()){ - if(seen.find(i) != seen.end()) - continue; - bool was_modified = false; - was_modified = was_modified || rewrite_mult(i, builder); - // was_modified = was_modified || rewrite_cts_cfs(i, builder); -// was_modified = was_modified || rewrite_trans_phi(i, builder); - was_modified = was_modified || rewrite_insert_extract(i, builder); - was_modified = was_modified || rewrite_unit_red(i, builder); - was_modified = was_modified || rewrite_gep_ptr_min_off_plus_off(i, builder); - // TODO: DOESN'T WORK FOR VECTORIZED MASKED LOAD -// was_modified = was_modified || rewrite_select_masked_load(i, builder); - was_modified = was_modified || rewrite_cvt_layout(i, builder); - if(tgt_->as_nvidia() && tgt_->as_nvidia()->sm() >= 80) - was_modified = was_modified || rewrite_load_to_shared(i, builder); - if(was_modified) - seen.insert(i); - } - }while(seen.size() != n_seen); -} - -} -} -} diff --git a/lib/codegen/transform/pipeline.cc b/lib/codegen/transform/pipeline.cc deleted file mode 100644 index 0c5c0b292c50..000000000000 --- a/lib/codegen/transform/pipeline.cc +++ /dev/null @@ -1,331 +0,0 @@ -#include -#include -#include "triton/codegen/transform/pipeline.h" -#include "triton/ir/module.h" -#include "triton/ir/function.h" -#include "triton/ir/basic_block.h" -#include "triton/ir/instructions.h" -#include "triton/ir/utils.h" - -namespace triton { -namespace codegen{ -namespace transform{ - - -void recursive_deps(ir::value* v, ir::basic_block* block, std::vector& ret){ - ir::instruction* i = dynamic_cast(v); - if(!i || i->get_parent() != block) - return; - if(i->get_id()==ir::INST_PHI) - return; - ret.push_back(i); - for(ir::user* u: i->get_users()) - recursive_deps(u, block, ret); -} - -void get_induction_vars(ir::value* cond, std::set& phis) { - auto instr = dynamic_cast(cond); - for (auto op : instr->ops()) { - if (auto phi_op = dynamic_cast(op)) { - phis.insert(phi_op); - return; - } - if (dynamic_cast(op)) - get_induction_vars(op, phis); - } -} - -/// assume incoming block is 1 -ir::value* rematerialize_vals(ir::builder& builder, ir::basic_block* block, ir::value* v, - std::map& prev_phi_vals) { - ir::instruction* i = dynamic_cast(v); - if(!i || i->get_parent() != block) - return v; - if(ir::phi_node* phi = dynamic_cast(v)) { - if (prev_phi_vals.find(phi) == prev_phi_vals.end()) - throw std::runtime_error("Don't have that phi node\n"); - return prev_phi_vals.at(phi); - } - - std::vector new_ops; - for(ir::value* op: i->ops()){ - new_ops.push_back(rematerialize_vals(builder, block, op, prev_phi_vals)); - } - ir::instruction* ret = i->clone(); - for(size_t k = 0; k < new_ops.size(); k++) - ret->set_operand(k, new_ops[k]); - builder.insert(ret); - return ret; -} - -ir::value* rematerialize(ir::builder& builder, ir::basic_block* block, - ir::value* v, size_t phi_idx){ - ir::instruction* i = dynamic_cast(v); - if(!i || i->get_parent() != block) - return v; - if(ir::phi_node* phi = dynamic_cast(v)) - return phi->get_incoming_value(phi_idx); - - std::vector new_ops; - for(ir::value* op: i->ops()){ - new_ops.push_back(rematerialize(builder, block, op, phi_idx)); - } - ir::instruction* ret = i->clone(); - for(size_t k = 0; k < new_ops.size(); k++) - ret->set_operand(k, new_ops[k]); - builder.insert(ret); - return ret; -} - -/// moving the prev phi vals to the next iteration -std::map update_prev_phi_vals( - ir::builder& builder, ir::basic_block* block, std::map& prev_phi_vals) { - std::map next_phi_vals; - for (auto &[phi, val] : prev_phi_vals) { - next_phi_vals[phi] = rematerialize_vals(builder, block, phi->get_incoming_value(1), prev_phi_vals); - } - return next_phi_vals; -} - -void finalize_iv_vals(ir::builder& builder, ir::basic_block* block, std::map& load_ivs, - std::map& next_load_ivs) { - for (auto& [phi, val] : load_ivs) { - if (auto new_phi = dynamic_cast(val)) { - ir::value* next_k = rematerialize_vals(builder, block, phi->get_incoming_value(1), load_ivs); - assert(new_phi->get_num_operands() == 1 && "should be incomplete phi"); - new_phi->add_incoming(next_k, phi->get_incoming_block(1)); - // cache next_k (to be used by next_mask) - next_load_ivs[phi] = next_k; - } else - throw std::runtime_error("must be phi"); - } -} - -struct pipeline_info_t { - ir::load_inst* load; - ir::phi_node* ptr; - ir::dot_inst* dot; - - pipeline_info_t(ir::load_inst* load, ir::phi_node* ptr, ir::dot_inst* dot) - : load(load), ptr(ptr), dot(dot) {} -}; - -void pipeline::run(ir::module &mod) { - if (num_stages_ <= 1) - return; - // *Very* conservative heuristics for pre-fetching. - // A load instruction can be pipelined if: - // - the pointer is a phi node that references a value - // in its basic block (i.e., pointer induction variable) - // - the load has only a single use in a dot instruction - // As more use cases become apparent, this pass will be improved - std::vector to_pipeline; - ir::for_each_instruction(mod, [&](ir::instruction *i){ - if(auto* load = dynamic_cast(i)){ - ir::phi_node* ptr = dynamic_cast(load->get_pointer_operand()); - auto users = load->get_users(); - auto dot = dynamic_cast(*users.begin()); - if(ptr && ptr->get_incoming_block(1) == ptr->get_parent() - && users.size() == 1 && dot) - to_pipeline.push_back({load, ptr, dot}); - }}); - // do the pipelining - std::vector new_loads; - ir::builder &builder = mod.get_builder(); - const int num_stages = num_stages_; - std::vector>> preheader_loads; // Used to reorder loads - - for(auto info: to_pipeline){ - ir::load_inst* load = info.load; - ir::phi_node* ptr = info.ptr; - ir::basic_block* block = load->get_parent(); - ir::basic_block* header = block->get_predecessors()[0]; - auto* block_br = dynamic_cast(block->get_inst_list().back()); - auto* header_br = dynamic_cast(header->get_inst_list().back()); - assert(block_br); - assert(header_br); - ir::type* ty = load->get_type(); - // multi-stage pipe - if (has_copy_async_ && num_stages > 2) { - ir::value* header_cond = header_br->get_cond(); - ir::value* block_cond = block_br->get_cond(); - // 1. collect induction variables - std::set induction_vars; - get_induction_vars(block_cond, induction_vars); - - std::vector first_ptrs(num_stages-1); - std::vector first_loads(num_stages-1); - std::vector first_masks(num_stages-1); - std::vector loop_conds(num_stages-1); - - std::map prev_phi_vals; - // initialize prev_phi_vals - // Add all phi nodes. The following DCE pass will delete dead ones. - for (ir::instruction *instr : block->get_inst_list()) - if (auto *phi = dynamic_cast(instr)) - if (phi->get_incoming_block(1) == block) - prev_phi_vals[phi] = phi->get_value_for_block(header); - - builder.set_insert_point(header->get_inst_list().back()); - first_ptrs[0] = ptr->get_value_for_block(header); - loop_conds[0] = header_cond; - first_masks[0] = builder.create_splat(loop_conds[0], ty->get_block_shapes()); - ir::value* false_value = nullptr; - if (auto* masked_load = dynamic_cast(load)) { - ir::value* remat_mask =rematerialize_vals(builder, block, masked_load->get_mask_operand(), prev_phi_vals) ; - ir::value* remat_false_value = - rematerialize_vals(builder, block, masked_load->get_false_value_operand(), prev_phi_vals); - first_masks[0] = builder.create_and(first_masks[0], remat_mask); - false_value = remat_false_value; - } else - false_value = builder.create_splat(ir::undef_value::get(ty->get_scalar_ty()), ty->get_block_shapes()); - first_loads[0] = builder.create_masked_load(first_ptrs[0], first_masks[0], false_value, load->get_cache_modifier(), load->get_eviction_policy(), load->get_is_volatile()); - - for (int stage = 1; stage < num_stages-1; ++stage) { - // mask is the loop condition of the previous iteration - loop_conds[stage] = rematerialize_vals(builder, block, block_cond, prev_phi_vals); - prev_phi_vals = update_prev_phi_vals(builder, block, prev_phi_vals); - first_ptrs[stage] = rematerialize_vals(builder, block, ptr, prev_phi_vals); - first_masks[stage] = builder.create_splat(loop_conds[stage], ty->get_block_shapes()); - if (auto* masked_load = dynamic_cast(load)) { - ir::value* remat_mask = rematerialize_vals(builder, block, masked_load->get_mask_operand(), prev_phi_vals); - ir::value* remat_false_value = - rematerialize_vals(builder, block, masked_load->get_false_value_operand(), prev_phi_vals); - first_masks[stage] = builder.create_and(first_masks[stage], remat_mask); - false_value = remat_false_value; - } - first_loads[stage] = builder.create_masked_load(first_ptrs[stage], first_masks[stage], false_value, load->get_cache_modifier(), load->get_eviction_policy(), load->get_is_volatile()); - } - - // create new phis for induction variables - builder.set_insert_point(block->get_first_non_phi()); - std::map load_ivs; - std::map next_load_ivs; - for (auto& [iv, val] : prev_phi_vals) { - ir::phi_node* pn = builder.create_phi(iv->get_type(), 2); - pn->add_incoming(prev_phi_vals[iv], header); - load_ivs[iv] = pn; - } - // add incoming for phis & update next_load_ivs - finalize_iv_vals(builder, block, load_ivs, next_load_ivs); - - // pre-fetch next iteration - builder.set_insert_point(block->get_inst_list().back()); -// ir::value* next_ptr = ptr->get_value_for_block(block); - ir::value* next_ptr = rematerialize_vals(builder, block, ptr->get_value_for_block(block), load_ivs); - ir::value* next_mask = builder.create_splat( - rematerialize_vals(builder, block, block_cond, load_ivs), ty->get_block_shapes()); - if (auto* masked_load = dynamic_cast(load)) { - ir::value* remat_mask = rematerialize_vals(builder, block, masked_load->get_mask_operand(), next_load_ivs); - // TODO: false may depends on some other phi nodes - ir::value* remat_false_value = - rematerialize_vals(builder, block, masked_load->get_false_value_operand(), next_load_ivs); - next_mask = builder.create_and(next_mask, remat_mask); - false_value = remat_false_value; - } - ir::value* next_load = builder.create_masked_load(next_ptr, next_mask, false_value, load->get_cache_modifier(), load->get_eviction_policy(), load->get_is_volatile()); - - - // phi node - ptr->set_incoming_value(0, first_ptrs.back()); - builder.set_insert_point(block->get_first_non_phi()); - // nested phis for load - std::vector new_load_phis(num_stages-1); - for (auto& pn : new_load_phis) - pn = builder.create_phi(ty, 2); - for (int i=0; iadd_incoming(first_loads[i], header); - new_load_phis[i]->add_incoming(new_load_phis[i+1], block); - } - new_load_phis.back()->add_incoming(first_loads.back(), header); - new_load_phis.back()->add_incoming(next_load, block); - load->replace_all_uses_with(new_load_phis.front()); - new_loads.push_back(new_load_phis.back()); - - // record first_loads to reorder them - preheader_loads.push_back({new_load_phis.front(), first_loads}); - } else { - // pre-fetch first iteration - builder.set_insert_point(header->get_inst_list().back()); - ir::value* first_ptr = ptr->get_value_for_block(header); - ir::value* first_mask = builder.create_splat(header_br->get_cond(), ty->get_block_shapes()); - ir::value* false_value; - if(auto* masked_load = dynamic_cast(load)){ - ir::value* remat_mask = rematerialize(builder, block, masked_load->get_mask_operand(), 0); - ir::value* remat_false_value = rematerialize(builder, block, masked_load->get_false_value_operand(), 0); - first_mask = builder.create_and(first_mask, remat_mask); - false_value = remat_false_value; - } - else - false_value = builder.create_splat(ir::undef_value::get(ty->get_scalar_ty()), ty->get_block_shapes()); - ir::value* first_load = builder.create_masked_load(first_ptr, first_mask, false_value, load->get_cache_modifier(), load->get_eviction_policy(), load->get_is_volatile()); - // pre-fetch next iteration - builder.set_insert_point(block->get_inst_list().back()); - ir::value* next_ptr = ptr->get_value_for_block(block); - ir::value* next_mask = builder.create_splat(block_br->get_cond(), ty->get_block_shapes()); - if(auto* masked_load = dynamic_cast(load)){ - ir::value* remat_mask = rematerialize(builder, block, masked_load->get_mask_operand(), 1); - ir::value* remat_false_value = rematerialize(builder, block, masked_load->get_false_value_operand(), 1); - next_mask = builder.create_and(next_mask, remat_mask); - false_value = remat_false_value; - } - ir::value* next_load = builder.create_masked_load(next_ptr, next_mask, false_value, load->get_cache_modifier(), load->get_eviction_policy(), load->get_is_volatile()); - // phi node - builder.set_insert_point(block->get_first_non_phi()); - ir::phi_node* new_load = builder.create_phi(ty, 2); - new_load->add_incoming(first_load, header); - new_load->add_incoming(next_load, block); - load->replace_all_uses_with(new_load); - new_loads.push_back(new_load); - } - } - - // try to reorder prefetched value from a0, a1, a2, ..., b0, b1, b2, ... to - // a0, b0, a1, b1, ... - if (!preheader_loads.empty()) { - ir::basic_block* header = preheader_loads.begin()->first->get_incoming_block(0); - builder.set_insert_point(header->get_inst_list().back()); - for (int i=1; i(iter->second.at(i)); - ir::instruction* moved_load = original_load->clone(); - builder.insert(moved_load); - original_load->replace_all_uses_with(moved_load); - } - } - } - - // try to move dot_inst after loads - // for better overlap of io and compute - struct move_config_t{ - std::vector insts; - ir::load_inst* dst; - }; - std::vector to_move(to_pipeline.size()); - - if(has_copy_async_){ - for (size_t idx = 0; idx < to_pipeline.size(); ++idx) { - auto info = to_pipeline[idx]; - ir::load_inst* load = info.load; - ir::phi_node* ptr = info.ptr; - ir::dot_inst* dot = info.dot; - ir::basic_block* bb = dot->get_parent(); - recursive_deps(dot, bb, to_move[idx].insts); - to_move[idx].dst = load; - } - - for(auto& move_config: to_move){ - builder.set_insert_point_after(move_config.dst); - for(ir::instruction* i: move_config.insts){ - i->get_parent()->erase(i); - builder.insert(i); - } - } - } - - -} - -} -} -} diff --git a/lib/codegen/transform/prefetch.cc b/lib/codegen/transform/prefetch.cc deleted file mode 100644 index 30b2a10f2718..000000000000 --- a/lib/codegen/transform/prefetch.cc +++ /dev/null @@ -1,133 +0,0 @@ -#include "triton/codegen/transform/prefetch.h" -#include "triton/codegen/target.h" -#include "triton/ir/module.h" -#include "triton/ir/function.h" -#include "triton/ir/basic_block.h" -#include "triton/ir/instructions.h" -#include "triton/ir/utils.h" -#include "triton/ir/print.h" -#include -#include -#include - -namespace triton::codegen::transform { - -/// find defs till phis -static void recursive_defs(ir::value *v, ir::basic_block *bb, std::vector &ret) { - ir::instruction *i = dynamic_cast(v); - if (!i || i->get_parent() != bb) - return; - if (i->get_id() == ir::INST_PHI) - return; - ret.push_back(i); - for (ir::value *op : i->ops()) - recursive_defs(op, bb, ret); -} - -void prefetch::run(ir::module &mod) { - // 1. collect dots that can be prefethced - std::vector to_prefetch; - ir::for_each_instruction(mod, [&](ir::instruction *i) { - if (auto *dot = dynamic_cast(i)) { - // Now only do prefetching when dot is using tensor cores - if (!(dot->get_operand(0)->get_type()->get_scalar_ty()->is_fp16_ty() || - dot->get_operand(0)->get_type()->get_scalar_ty()->is_bf16_ty() || - (dot->get_operand(0)->get_type()->get_scalar_ty()->is_fp32_ty() && dot->allow_tf32() - && tgt_->as_nvidia() && tgt_->as_nvidia()->sm() >= 80) || - (dot->get_operand(0)->get_type()->get_scalar_ty()->is_integer_ty(8) - && dot->get_operand(1)->get_type()->get_scalar_ty()->is_integer_ty(8) - && tgt_->as_nvidia() && tgt_->as_nvidia()->sm() >= 80) - ) - ) - return; - auto *a = dynamic_cast(dot->get_operand(0)); - auto *b = dynamic_cast(dot->get_operand(1)); - if (a && a->get_incoming_block(1) == a->get_parent() && - b && b->get_incoming_block(1) == b->get_parent()) - to_prefetch.push_back(dot); - } - }); - - assert(to_prefetch.size() <=1 && "Don't know what to do with multiple dots"); - ir::builder &builder = mod.get_builder(); - // 2. do the prefetching - for (ir::dot_inst* dot : to_prefetch) { - auto *a = dynamic_cast(dot->get_operand(0)); - auto *b = dynamic_cast(dot->get_operand(1)); - assert(a->get_incoming_block(0) == b->get_incoming_block(0)); - ir::basic_block *loop_header = a->get_incoming_block(0); - ir::basic_block *loop_body = a->get_parent(); - - // mark as prefetched - dot->set_prefetched(true); - - // 1. in the loop header (first iteration) - builder.set_insert_point(loop_header->get_inst_list().back()); - assert(a && b); - builder.create_prefetch_s(a->get_incoming_value(0), /*inc*/ 0); - builder.create_prefetch_s(b->get_incoming_value(0), /*inc*/ 0); - - // 2. at the end of the loop body (next iteration) - builder.set_insert_point(loop_body->get_inst_list().back()); - builder.create_prefetch_s(a->get_incoming_value(1), /*inc*/ 1); - builder.create_prefetch_s(b->get_incoming_value(1), /*inc*/ 1); - - prefetched_vals_.insert(a->get_incoming_value(0)); - prefetched_vals_.insert(b->get_incoming_value(0)); - // nested phis - ir::value* next_a = a->get_incoming_value(1); - while (auto* next_a_phi = dynamic_cast(next_a)) { - prefetched_vals_.insert(next_a_phi->get_incoming_value(0)); - next_a = next_a_phi->get_incoming_value(1); - } - prefetched_vals_.insert(next_a); - - ir::value* next_b = b->get_incoming_value(1); - while (auto* next_b_phi = dynamic_cast(next_b)) { - prefetched_vals_.insert(next_b_phi->get_incoming_value(0)); - next_b = next_b_phi->get_incoming_value(1); - } - prefetched_vals_.insert(next_b); - } - - // move loads to the beginning of the loop - if (tgt_->as_nvidia() && tgt_->as_nvidia()->sm() < 80) { - for (ir::function *fn : mod.get_function_list()) - for (ir::basic_block *bb : fn->blocks()) { - // only apply to loop body - if (bb->get_predecessors().size() != 2 || bb->get_predecessors()[1] != bb) - continue; - // record loads (& dependency) to move - std::vector loads; - // record original inst order - std::map idx_map; - size_t idx = 0; - for (ir::instruction *inst : bb->get_inst_list()) { - if (auto *i = dynamic_cast(inst)) - recursive_defs(i, bb, loads); - idx_map[inst] = idx; - idx++; - } - - // remove duplicates & keep the original input order - std::sort(loads.begin(), loads.end()); - loads.erase(std::unique(loads.begin(), loads.end()), loads.end()); - std::sort(loads.begin(), loads.end(), [&idx_map](ir::instruction *a, ir::instruction *b) { - return idx_map[a] < idx_map[b]; - }); - - builder.set_insert_point(bb->get_first_non_phi()); - auto& inst_list = bb->get_inst_list(); - for (ir::instruction *i : loads){ - auto it = std::find(inst_list.begin(), inst_list.end(), i); - // make sure we don't invalidate insert point - // in case instruction already at the top - if(it == builder.get_insert_point()) - continue; - bb->erase(i); - builder.insert(i); - } - } - } -} -} // namespace triton::codegen::transform diff --git a/lib/codegen/transform/reorder.cc b/lib/codegen/transform/reorder.cc deleted file mode 100644 index 47dc47b6c24f..000000000000 --- a/lib/codegen/transform/reorder.cc +++ /dev/null @@ -1,51 +0,0 @@ -#include -#include -#include "triton/ir/module.h" -#include "triton/ir/function.h" -#include "triton/ir/basic_block.h" -#include "triton/ir/instructions.h" -#include "triton/codegen/transform/reorder.h" - -namespace triton { -namespace codegen{ -namespace transform{ - -void reorder::run(ir::module& mod){ -// ir::builder &builder = mod.get_builder(); -// std::vector> to_replace; - -// for(ir::function *fn: mod.get_function_list()) -// for(ir::basic_block *block: fn->blocks()) -// for(ir::instruction* i: block->get_inst_list()){ -// if(auto* ld = dynamic_cast(i)){ -// ir::value* _ptr = ld->get_pointer_operand(); -// ir::value* _msk = ld->get_mask_operand(); -// ir::value* _val = ld->get_false_value_operand(); -// auto ptr = std::find(block->begin(), block->end(), _ptr); -// auto msk = std::find(block->begin(), block->end(), _msk); -// auto val = std::find(block->begin(), block->end(), _val); -// if(ptr == block->end() || msk == block->end() || val == block->end()) -// continue; -// auto it = std::find(block->begin(), block->end(), i); -// int dist_ptr = std::distance(ptr, it); -// int dist_msk = std::distance(msk, it); -// int dist_val = std::distance(val, it); -// if(dist_ptr < dist_msk && dist_ptr < dist_val) -// builder.set_insert_point(++ptr); -// if(dist_msk < dist_ptr && dist_msk < dist_val) -// builder.set_insert_point(++msk); -// if(dist_val < dist_ptr && dist_val < dist_msk) -// builder.set_insert_point(++val); -// ir::value* new_ld = builder.create_masked_load(_ptr, _msk, _val); -// to_replace.push_back(std::make_pair(ld, new_ld)); -// } -// } - -// for(auto& x: to_replace) -// x.first->replace_all_uses_with(x.second); - -} - -} -} -} diff --git a/lib/driver/dispatch.cc b/lib/driver/dispatch.cc deleted file mode 100755 index de6f1901b83f..000000000000 --- a/lib/driver/dispatch.cc +++ /dev/null @@ -1,302 +0,0 @@ -/* Copyright 2015-2017 Philippe Tillet -* -* Permission is hereby granted, free of charge, to any person obtaining -* a copy of this software and associated documentation files -* (the "Software"), to deal in the Software without restriction, -* including without limitation the rights to use, copy, modify, merge, -* publish, distribute, sublicense, and/or sell copies of the Software, -* and to permit persons to whom the Software is furnished to do so, -* subject to the following conditions: -* -* The above copyright notice and this permission notice shall be -* included in all copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - -#include "triton/driver/dispatch.h" - -namespace triton -{ -namespace driver -{ - -//Helpers for function definition -#define DEFINE0(init, hlib, ret, fname) ret dispatch::fname()\ -{return f_impl(hlib, fname, fname ## _, #fname); }\ -void* dispatch::fname ## _; - -#define DEFINE1(init, hlib, ret, fname, t1) ret dispatch::fname(t1 a)\ -{return f_impl(hlib, fname, fname ## _, #fname, a); }\ -void* dispatch::fname ## _; - -#define DEFINE2(init, hlib, ret, fname, t1, t2) ret dispatch::fname(t1 a, t2 b)\ -{return f_impl(hlib, fname, fname ## _, #fname, a, b); }\ -void* dispatch::fname ## _; - -#define DEFINE3(init, hlib, ret, fname, t1, t2, t3) ret dispatch::fname(t1 a, t2 b, t3 c)\ -{return f_impl(hlib, fname, fname ## _, #fname, a, b, c); }\ -void* dispatch::fname ## _; - -#define DEFINE4(init, hlib, ret, fname, t1, t2, t3, t4) ret dispatch::fname(t1 a, t2 b, t3 c, t4 d)\ -{return f_impl(hlib, fname, fname ## _, #fname, a, b, c, d); }\ -void* dispatch::fname ## _; - -#define DEFINE5(init, hlib, ret, fname, t1, t2, t3, t4, t5) ret dispatch::fname(t1 a, t2 b, t3 c, t4 d, t5 e)\ -{return f_impl(hlib, fname, fname ## _, #fname, a, b, c, d, e); }\ -void* dispatch::fname ## _; - -#define DEFINE6(init, hlib, ret, fname, t1, t2, t3, t4, t5, t6) ret dispatch::fname(t1 a, t2 b, t3 c, t4 d, t5 e, t6 f)\ -{return f_impl(hlib, fname, fname ## _, #fname, a, b, c, d, e, f); }\ -void* dispatch::fname ## _; - -#define DEFINE7(init, hlib, ret, fname, t1, t2, t3, t4, t5, t6, t7) ret dispatch::fname(t1 a, t2 b, t3 c, t4 d, t5 e, t6 f, t7 g)\ -{return f_impl(hlib, fname, fname ## _, #fname, a, b, c, d, e, f, g); }\ -void* dispatch::fname ## _; - -#define DEFINE8(init, hlib, ret, fname, t1, t2, t3, t4, t5, t6, t7, t8) ret dispatch::fname(t1 a, t2 b, t3 c, t4 d, t5 e, t6 f, t7 g, t8 h)\ -{return f_impl(hlib, fname, fname ## _, #fname, a, b, c, d, e, f, g, h); }\ -void* dispatch::fname ## _; - -#define DEFINE9(init, hlib, ret, fname, t1, t2, t3, t4, t5, t6, t7, t8, t9) ret dispatch::fname(t1 a, t2 b, t3 c, t4 d, t5 e, t6 f, t7 g, t8 h, t9 i)\ -{return f_impl(hlib, fname, fname ## _, #fname, a, b, c, d, e, f, g, h, i); }\ -void* dispatch::fname ## _; - -#define DEFINE10(init, hlib, ret, fname, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) ret dispatch::fname(t1 a, t2 b, t3 c, t4 d, t5 e, t6 f, t7 g, t8 h, t9 i, t10 j)\ -{return f_impl(hlib, fname, fname ## _, #fname, a, b, c, d, e, f, g, h, i, j); }\ -void* dispatch::fname ## _; - -#define DEFINE11(init, hlib, ret, fname, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) ret dispatch::fname(t1 a, t2 b, t3 c, t4 d, t5 e, t6 f, t7 g, t8 h, t9 i, t10 j, t11 k)\ -{return f_impl(hlib, fname, fname ## _, #fname, a, b, c, d, e, f, g, h, i, j, k); }\ -void* dispatch::fname ## _; - -#define DEFINE13(init, hlib, ret, fname, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13) ret dispatch::fname(t1 a, t2 b, t3 c, t4 d, t5 e, t6 f, t7 g, t8 h, t9 i, t10 j, t11 k, t12 l, t13 m)\ -{return f_impl(hlib, fname, fname ## _, #fname, a, b, c, d, e, f, g, h, i, j, k, l, m); }\ -void* dispatch::fname ## _; - -#define DEFINE19(init, hlib, ret, fname, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11, t12, t13, t14, t15, t16, t17, t18, t19) ret dispatch::fname(t1 a, t2 b, t3 c, t4 d, t5 e, t6 f, t7 g, t8 h, t9 i, t10 j, t11 k, t12 l, t13 m, t14 n, t15 o, t16 p, t17 q, t18 r, t19 s)\ -{return f_impl(hlib, fname, fname ## _, #fname, a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s); }\ -void* dispatch::fname ## _; - - -/* ------------------- * - * CUDA - * ------------------- */ - -bool dispatch::cuinit(){ - if(cuda_==nullptr){ - #ifdef _WIN32 - cuda_ = dlopen("cudart64_110.dll", RTLD_LAZY); - #else - cuda_ = dlopen("libcuda.so", RTLD_LAZY); - if(!cuda_) - cuda_ = dlopen("libcuda.so.1", RTLD_LAZY); - #endif - if(!cuda_) - throw std::runtime_error("Could not find `libcuda.so`. Make sure it is in your LD_LIBRARY_PATH."); - } - if(cuda_ == nullptr) - return false; - CUresult (*fptr)(unsigned int); - cuInit_ = dlsym(cuda_, "cuInit"); - *reinterpret_cast(&fptr) = cuInit_; - CUresult res = (*fptr)(0); - check(res); - return true; -} - -#define CUDA_DEFINE1(ret, fname, t1) DEFINE1(cuinit, cuda_, ret, fname, t1) -#define CUDA_DEFINE2(ret, fname, t1, t2) DEFINE2(cuinit, cuda_, ret, fname, t1, t2) -#define CUDA_DEFINE3(ret, fname, t1, t2, t3) DEFINE3(cuinit, cuda_, ret, fname, t1, t2, t3) -#define CUDA_DEFINE4(ret, fname, t1, t2, t3, t4) DEFINE4(cuinit, cuda_, ret, fname, t1, t2, t3, t4) -#define CUDA_DEFINE5(ret, fname, t1, t2, t3, t4, t5) DEFINE5(cuinit, cuda_, ret, fname, t1, t2, t3, t4, t5) -#define CUDA_DEFINE6(ret, fname, t1, t2, t3, t4, t5, t6) DEFINE6(cuinit, cuda_, ret, fname, t1, t2, t3, t4, t5, t6) -#define CUDA_DEFINE7(ret, fname, t1, t2, t3, t4, t5, t6, t7) DEFINE7(cuinit, cuda_, ret, fname, t1, t2, t3, t4, t5, t6, t7) -#define CUDA_DEFINE8(ret, fname, t1, t2, t3, t4, t5, t6, t7, t8) DEFINE8(cuinit, cuda_, ret, fname, t1, t2, t3, t4, t5, t6, t7, t8) -#define CUDA_DEFINE9(ret, fname, t1, t2, t3, t4, t5, t6, t7, t8, t9) DEFINE9(cuinit, cuda_, ret, fname, t1, t2, t3, t4, t5, t6, t7, t8, t9) -#define CUDA_DEFINE10(ret, fname, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) DEFINE10(cuinit, cuda_, ret, fname, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) -#define CUDA_DEFINE11(ret, fname, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) DEFINE11(cuinit, cuda_, ret, fname, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) - -// context management -CUDA_DEFINE1(CUresult, cuCtxDestroy_v2, CUcontext) -CUDA_DEFINE3(CUresult, cuCtxCreate_v2, CUcontext *, unsigned int, CUdevice) -CUDA_DEFINE1(CUresult, cuCtxGetDevice, CUdevice*) -CUDA_DEFINE2(CUresult, cuCtxEnablePeerAccess, CUcontext, unsigned int) -CUDA_DEFINE1(CUresult, cuInit, unsigned int) -CUDA_DEFINE1(CUresult, cuDriverGetVersion, int *) -// device management -CUDA_DEFINE2(CUresult, cuDeviceGet, CUdevice *, int) -CUDA_DEFINE3(CUresult, cuDeviceGetName, char *, int, CUdevice) -CUDA_DEFINE3(CUresult, cuDeviceGetPCIBusId, char *, int, CUdevice) -CUDA_DEFINE3(CUresult, cuDeviceGetAttribute, int *, CUdevice_attribute, CUdevice) -CUDA_DEFINE1(CUresult, cuDeviceGetCount, int*) - -// link management -CUDA_DEFINE6(CUresult, cuLinkAddFile_v2, CUlinkState, CUjitInputType, const char *, unsigned int , CUjit_option *, void **); -CUDA_DEFINE8(CUresult, cuLinkAddData_v2, CUlinkState, CUjitInputType, void*, size_t, const char*, unsigned int, CUjit_option*, void**); -CUDA_DEFINE4(CUresult, cuLinkCreate_v2, unsigned int, CUjit_option*, void**, CUlinkState*); -CUDA_DEFINE1(CUresult, cuLinkDestroy, CUlinkState); -CUDA_DEFINE3(CUresult, cuLinkComplete, CUlinkState, void**, size_t*); -// module management -CUDA_DEFINE4(CUresult, cuModuleGetGlobal_v2, CUdeviceptr*, size_t*, CUmodule, const char*) -CUDA_DEFINE2(CUresult, cuModuleLoad, CUmodule *, const char *) -CUDA_DEFINE1(CUresult, cuModuleUnload, CUmodule) -CUDA_DEFINE2(CUresult, cuModuleLoadData, CUmodule *, const void *) -CUDA_DEFINE5(CUresult, cuModuleLoadDataEx, CUmodule *, const void *, unsigned int, CUjit_option *, void **) -CUDA_DEFINE3(CUresult, cuModuleGetFunction, CUfunction *, CUmodule, const char *) -// stream management -CUDA_DEFINE2(CUresult, cuStreamCreate, CUstream *, unsigned int) -CUDA_DEFINE1(CUresult, cuStreamSynchronize, CUstream) -CUDA_DEFINE1(CUresult, cuStreamDestroy_v2, CUstream) -CUDA_DEFINE2(CUresult, cuStreamGetCtx, CUstream, CUcontext*) -CUDA_DEFINE11(CUresult, cuLaunchKernel, CUfunction, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, CUstream, void **, void **) -// function management -CUDA_DEFINE3(CUresult, cuFuncGetAttribute, int*, CUfunction_attribute, CUfunction) -CUDA_DEFINE3(CUresult, cuFuncSetAttribute, CUfunction, CUfunction_attribute, int) -CUDA_DEFINE2(CUresult, cuFuncSetCacheConfig, CUfunction, CUfunc_cache) -// memory management -CUDA_DEFINE3(CUresult, cuMemcpyDtoH_v2, void *, CUdeviceptr, size_t) -CUDA_DEFINE1(CUresult, cuMemFree_v2, CUdeviceptr) -CUDA_DEFINE4(CUresult, cuMemcpyDtoHAsync_v2, void *, CUdeviceptr, size_t, CUstream) -CUDA_DEFINE4(CUresult, cuMemcpyHtoDAsync_v2, CUdeviceptr, const void *, size_t, CUstream) -CUDA_DEFINE3(CUresult, cuMemcpyHtoD_v2, CUdeviceptr, const void *, size_t ) -CUDA_DEFINE2(CUresult, cuMemAlloc_v2, CUdeviceptr*, size_t) -CUDA_DEFINE3(CUresult, cuPointerGetAttribute, void*, CUpointer_attribute, CUdeviceptr) -CUDA_DEFINE4(CUresult, cuMemsetD8Async, CUdeviceptr, unsigned char, size_t, CUstream) -// event management -CUDA_DEFINE2(CUresult, cuEventCreate, CUevent *, unsigned int) -CUDA_DEFINE3(CUresult, cuEventElapsedTime, float *, CUevent, CUevent) -CUDA_DEFINE2(CUresult, cuEventRecord, CUevent, CUstream) -CUDA_DEFINE1(CUresult, cuEventDestroy_v2, CUevent) - - - -/* ------------------- * - * NVML - * ------------------- */ -bool dispatch::nvmlinit(){ - #ifdef _WIN32 - if(nvml_==nullptr) - nvml_ = dlopen("nvml.dll", RTLD_LAZY); - #else - if(nvml_==nullptr) - nvml_ = dlopen("libnvidia-ml.so", RTLD_LAZY); - #endif - nvmlReturn_t (*fptr)(); - nvmlInit_v2_ = dlsym(nvml_, "nvmlInit_v2"); - *reinterpret_cast(&fptr) = nvmlInit_v2_; - nvmlReturn_t res = (*fptr)(); - check(res); - return res; -} - -#define NVML_DEFINE0(ret, fname) DEFINE0(nvmlinit, nvml_, ret, fname) -#define NVML_DEFINE1(ret, fname, t1) DEFINE1(nvmlinit, nvml_, ret, fname, t1) -#define NVML_DEFINE2(ret, fname, t1, t2) DEFINE2(nvmlinit, nvml_, ret, fname, t1, t2) -#define NVML_DEFINE3(ret, fname, t1, t2, t3) DEFINE3(nvmlinit, nvml_, ret, fname, t1, t2, t3) - -NVML_DEFINE2(nvmlReturn_t, nvmlDeviceGetHandleByPciBusId_v2, const char *, nvmlDevice_t*) -NVML_DEFINE3(nvmlReturn_t, nvmlDeviceGetClockInfo, nvmlDevice_t, nvmlClockType_t, unsigned int*) -NVML_DEFINE3(nvmlReturn_t, nvmlDeviceGetMaxClockInfo, nvmlDevice_t, nvmlClockType_t, unsigned int*) -NVML_DEFINE3(nvmlReturn_t, nvmlDeviceSetApplicationsClocks, nvmlDevice_t, unsigned int, unsigned int) - -/* ------------------- * - * HIP - * ------------------- */ -bool dispatch::hipinit(){ - if(hip_==nullptr) - hip_ = dlopen("libamdhip64.so", RTLD_LAZY); - if(hip_ == nullptr) - return false; - hipError_t (*fptr)(); - hipInit_ = dlsym(hip_, "hipInit"); - *reinterpret_cast(&fptr) = hipInit_; - hipError_t res = (*fptr)(); - check(res); - return res; -} - -#define HIP_DEFINE1(ret, fname, t1) DEFINE1(hipinit, hip_, ret, fname, t1) -#define HIP_DEFINE2(ret, fname, t1, t2) DEFINE2(hipinit, hip_, ret, fname, t1, t2) -#define HIP_DEFINE3(ret, fname, t1, t2, t3) DEFINE3(hipinit, hip_, ret, fname, t1, t2, t3) -#define HIP_DEFINE4(ret, fname, t1, t2, t3, t4) DEFINE4(hipinit, hip_, ret, fname, t1, t2, t3, t4) -#define HIP_DEFINE5(ret, fname, t1, t2, t3, t4, t5) DEFINE5(hipinit, hip_, ret, fname, t1, t2, t3, t4, t5) -#define HIP_DEFINE6(ret, fname, t1, t2, t3, t4, t5, t6) DEFINE6(hipinit, hip_, ret, fname, t1, t2, t3, t4, t5, t6) -#define HIP_DEFINE7(ret, fname, t1, t2, t3, t4, t5, t6, t7) DEFINE7(hipinit, hip_, ret, fname, t1, t2, t3, t4, t5, t6, t7) -#define HIP_DEFINE8(ret, fname, t1, t2, t3, t4, t5, t6, t7, t8) DEFINE8(hipinit, hip_, ret, fname, t1, t2, t3, t4, t5, t6, t7, t8) -#define HIP_DEFINE9(ret, fname, t1, t2, t3, t4, t5, t6, t7, t8, t9) DEFINE9(hipinit, hip_, ret, fname, t1, t2, t3, t4, t5, t6, t7, t8, t9) -#define HIP_DEFINE10(ret, fname, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) DEFINE10(hipinit, hip_, ret, fname, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10) -#define HIP_DEFINE11(ret, fname, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) DEFINE11(hipinit, hip_, ret, fname, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) - -// context management -HIP_DEFINE1(hipError_t, hipCtxDestroy, hipCtx_t) -HIP_DEFINE3(hipError_t, hipCtxCreate, hipCtx_t *, unsigned int, hipDevice_t) -HIP_DEFINE1(hipError_t, hipCtxGetDevice, hipDevice_t*) -HIP_DEFINE1(hipError_t, hipCtxPushCurrent, hipCtx_t) -HIP_DEFINE1(hipError_t, hipCtxPopCurrent, hipCtx_t*) -HIP_DEFINE2(hipError_t, hipCtxEnablePeerAccess, hipCtx_t, unsigned int) -HIP_DEFINE1(hipError_t, hipInit, unsigned int) -HIP_DEFINE1(hipError_t, hipDriverGetVersion, int *) -// device management -HIP_DEFINE2(hipError_t, hipGetDevice, hipDevice_t *, int) -HIP_DEFINE3(hipError_t, hipDeviceGetName, char *, int, hipDevice_t) -HIP_DEFINE3(hipError_t, hipDeviceGetPCIBusId, char *, int, hipDevice_t) -HIP_DEFINE3(hipError_t, hipDeviceGetAttribute, int *, hipDeviceAttribute_t, hipDevice_t) -HIP_DEFINE1(hipError_t, hipGetDeviceCount, int *) -// module management -HIP_DEFINE4(hipError_t, hipModuleGetGlobal, hipDeviceptr_t*, size_t*, hipModule_t, const char*) -HIP_DEFINE2(hipError_t, hipModuleLoad, hipModule_t *, const char *) -HIP_DEFINE1(hipError_t, hipModuleUnload, hipModule_t) -HIP_DEFINE2(hipError_t, hipModuleLoadData, hipModule_t *, const void *) -HIP_DEFINE5(hipError_t, hipModuleLoadDataEx, hipModule_t *, const void *, unsigned int, hipJitOption *, void **) -HIP_DEFINE3(hipError_t, hipModuleGetFunction, hipFunction_t *, hipModule_t, const char *) -// stream management -HIP_DEFINE2(hipError_t, hipStreamCreate, hipStream_t *, unsigned int) -HIP_DEFINE1(hipError_t, hipStreamSynchronize, hipStream_t) -HIP_DEFINE1(hipError_t, hipStreamDestroy, hipStream_t) -HIP_DEFINE11(hipError_t, hipModuleLaunchKernel, hipFunction_t, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, unsigned int, hipStream_t, void **, void **) -// function management -HIP_DEFINE2(hipError_t, hipFuncGetAttributes, hipFuncAttributes*, void*) -HIP_DEFINE2(hipError_t, hipFuncSetCacheConfig, hipFunction_t, hipFuncCache_t) -// memory management -HIP_DEFINE3(hipError_t, hipMemcpyDtoH, void *, hipDeviceptr_t, size_t) -HIP_DEFINE1(hipError_t, hipFree, hipDeviceptr_t) -HIP_DEFINE4(hipError_t, hipMemcpyDtoHAsync, void *, hipDeviceptr_t, size_t, hipStream_t) -HIP_DEFINE4(hipError_t, hipMemcpyHtoDAsync, hipDeviceptr_t, const void *, size_t, hipStream_t) -HIP_DEFINE3(hipError_t, hipMemcpyHtoD, hipDeviceptr_t, const void *, size_t ) -HIP_DEFINE2(hipError_t, hipMalloc, hipDeviceptr_t*, size_t) -HIP_DEFINE3(hipError_t, hipPointerGetAttribute, void*, CUpointer_attribute, hipDeviceptr_t) -HIP_DEFINE4(hipError_t, hipMemsetD8Async, hipDeviceptr_t, unsigned char, size_t, hipStream_t) -// event management -HIP_DEFINE2(hipError_t, hipEventCreate, hipEvent_t *, unsigned int) -HIP_DEFINE3(hipError_t, hipEventElapsedTime, float *, hipEvent_t, hipEvent_t) -HIP_DEFINE2(hipError_t, hipEventRecord, hipEvent_t, hipStream_t) -HIP_DEFINE1(hipError_t, hipEventDestroy, hipEvent_t) - - -/* ------------------- * - * COMMON - * ------------------- */ - -// Release -void dispatch::release(){ - if(cuda_){ - dlclose(cuda_); - cuda_ = nullptr; - } -} - -void* dispatch::cuda_; -void* dispatch::nvml_; -void* dispatch::nvmlInit_v2_; -void* dispatch::hip_; - - -} -} diff --git a/lib/driver/error.cc b/lib/driver/error.cc deleted file mode 100755 index fda2b7f33fba..000000000000 --- a/lib/driver/error.cc +++ /dev/null @@ -1,166 +0,0 @@ -/* Copyright 2015-2017 Philippe Tillet -* -* Permission is hereby granted, free of charge, to any person obtaining -* a copy of this software and associated documentation files -* (the "Software"), to deal in the Software without restriction, -* including without limitation the rights to use, copy, modify, merge, -* publish, distribute, sublicense, and/or sell copies of the Software, -* and to permit persons to whom the Software is furnished to do so, -* subject to the following conditions: -* -* The above copyright notice and this permission notice shall be -* included in all copies or substantial portions of the Software. -* -* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -*/ - -#include "triton/driver/error.h" - -namespace triton -{ -namespace driver -{ - -void check(CUresult err) -{ - using namespace exception::cuda; - switch(err) - { - case CUDA_SUCCESS : break; - case CUDA_ERROR_INVALID_VALUE : throw invalid_value(); - case CUDA_ERROR_OUT_OF_MEMORY : throw out_of_memory(); - case CUDA_ERROR_NOT_INITIALIZED : throw not_initialized(); - case CUDA_ERROR_DEINITIALIZED : throw deinitialized(); - case CUDA_ERROR_PROFILER_DISABLED : throw profiler_disabled(); - case CUDA_ERROR_PROFILER_NOT_INITIALIZED : throw profiler_not_initialized(); - case CUDA_ERROR_PROFILER_ALREADY_STARTED : throw profiler_already_started(); - case CUDA_ERROR_PROFILER_ALREADY_STOPPED : throw profiler_already_stopped(); - case CUDA_ERROR_NO_DEVICE : throw no_device(); - case CUDA_ERROR_INVALID_DEVICE : throw invalid_device(); - case CUDA_ERROR_INVALID_IMAGE : throw invalid_image(); - case CUDA_ERROR_INVALID_CONTEXT : throw invalid_context(); - case CUDA_ERROR_CONTEXT_ALREADY_CURRENT : throw context_already_current(); - case CUDA_ERROR_MAP_FAILED : throw map_failed(); - case CUDA_ERROR_UNMAP_FAILED : throw unmap_failed(); - case CUDA_ERROR_ARRAY_IS_MAPPED : throw array_is_mapped(); - case CUDA_ERROR_ALREADY_MAPPED : throw already_mapped(); - case CUDA_ERROR_NO_BINARY_FOR_GPU : throw no_binary_for_gpu(); - case CUDA_ERROR_ALREADY_ACQUIRED : throw already_acquired(); - case CUDA_ERROR_NOT_MAPPED : throw not_mapped(); - case CUDA_ERROR_NOT_MAPPED_AS_ARRAY : throw not_mapped_as_array(); - case CUDA_ERROR_NOT_MAPPED_AS_POINTER : throw not_mapped_as_pointer(); - case CUDA_ERROR_ECC_UNCORRECTABLE : throw ecc_uncorrectable(); - case CUDA_ERROR_UNSUPPORTED_LIMIT : throw unsupported_limit(); - case CUDA_ERROR_CONTEXT_ALREADY_IN_USE : throw context_already_in_use(); - case CUDA_ERROR_PEER_ACCESS_UNSUPPORTED : throw peer_access_unsupported(); - case CUDA_ERROR_INVALID_PTX : throw invalid_ptx(); - case CUDA_ERROR_INVALID_GRAPHICS_CONTEXT : throw invalid_graphics_context(); - case CUDA_ERROR_INVALID_SOURCE : throw invalid_source(); - case CUDA_ERROR_FILE_NOT_FOUND : throw file_not_found(); - case CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND : throw shared_object_symbol_not_found(); - case CUDA_ERROR_SHARED_OBJECT_INIT_FAILED : throw shared_object_init_failed(); - case CUDA_ERROR_OPERATING_SYSTEM : throw operating_system(); - case CUDA_ERROR_INVALID_HANDLE : throw invalid_handle(); - case CUDA_ERROR_NOT_FOUND : throw not_found(); - case CUDA_ERROR_NOT_READY : throw not_ready(); - case CUDA_ERROR_ILLEGAL_ADDRESS : throw illegal_address(); - case CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES : throw launch_out_of_resources(); - case CUDA_ERROR_LAUNCH_TIMEOUT : throw launch_timeout(); - case CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING : throw launch_incompatible_texturing(); - case CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED : throw peer_access_already_enabled(); - case CUDA_ERROR_PEER_ACCESS_NOT_ENABLED : throw peer_access_not_enabled(); - case CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE : throw primary_context_active(); - case CUDA_ERROR_CONTEXT_IS_DESTROYED : throw context_is_destroyed(); - case CUDA_ERROR_ASSERT : throw assert_error(); - case CUDA_ERROR_TOO_MANY_PEERS : throw too_many_peers(); - case CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED : throw host_memory_already_registered(); - case CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED : throw host_memory_not_registered(); - case CUDA_ERROR_HARDWARE_STACK_ERROR : throw hardware_stack_error(); - case CUDA_ERROR_ILLEGAL_INSTRUCTION : throw illegal_instruction(); - case CUDA_ERROR_MISALIGNED_ADDRESS : throw misaligned_address(); - case CUDA_ERROR_INVALID_ADDRESS_SPACE : throw invalid_address_space(); - case CUDA_ERROR_INVALID_PC : throw invalid_pc(); - case CUDA_ERROR_LAUNCH_FAILED : throw launch_failed(); - case CUDA_ERROR_NOT_PERMITTED : throw not_permitted(); - case CUDA_ERROR_NOT_SUPPORTED : throw not_supported(); - case CUDA_ERROR_UNKNOWN : throw unknown(); - default : throw std::runtime_error("unimplemented code: " + std::to_string(err)); - } -} - -void check(hipError_t error) { - using namespace exception::hip; - switch(error) - { - case hipSuccess : break; - case hipErrorInvalidValue : throw invalid_value(); - case hipErrorMemoryAllocation : throw out_of_memory(); - case hipErrorNotInitialized : throw not_initialized(); - case hipErrorDeinitialized : throw deinitialized(); - case hipErrorProfilerDisabled : throw profiler_disabled(); - case hipErrorProfilerNotInitialized : throw profiler_not_initialized(); - case hipErrorProfilerAlreadyStarted : throw profiler_already_started(); - case hipErrorProfilerAlreadyStopped : throw profiler_already_stopped(); - case hipErrorNoDevice : throw no_device(); - case hipErrorInvalidSymbol : throw invalid_symbol(); - case hipErrorInvalidDevice : throw invalid_device(); - case hipErrorInvalidImage : throw invalid_image(); - case hipErrorInvalidContext : throw invalid_context(); - case hipErrorContextAlreadyCurrent : throw context_already_current(); - case hipErrorMapFailed : throw map_failed(); - case hipErrorUnmapFailed : throw unmap_failed(); - case hipErrorArrayIsMapped : throw array_is_mapped(); - case hipErrorAlreadyMapped : throw already_mapped(); - case hipErrorNoBinaryForGpu : throw no_binary_for_gpu(); - case hipErrorAlreadyAcquired : throw already_acquired(); - case hipErrorNotMapped : throw not_mapped(); - case hipErrorNotMappedAsArray : throw not_mapped_as_array(); - case hipErrorNotMappedAsPointer : throw not_mapped_as_pointer(); - case hipErrorECCNotCorrectable : throw ecc_uncorrectable(); - case hipErrorUnsupportedLimit : throw unsupported_limit(); - case hipErrorContextAlreadyInUse : throw context_already_in_use(); - case hipErrorPeerAccessUnsupported : throw peer_access_unsupported(); - case hipErrorInvalidKernelFile : throw invalid_ptx(); - case hipErrorInvalidGraphicsContext : throw invalid_graphics_context(); - case hipErrorInvalidSource : throw invalid_source(); - case hipErrorFileNotFound : throw file_not_found(); - case hipErrorSharedObjectSymbolNotFound : throw shared_object_symbol_not_found(); - case hipErrorSharedObjectInitFailed : throw shared_object_init_failed(); - case hipErrorOperatingSystem : throw operating_system(); - case hipErrorInvalidResourceHandle : throw invalid_handle(); - case hipErrorNotFound : throw not_found(); - case hipErrorNotReady : throw not_ready(); - case hipErrorIllegalAddress : throw illegal_address(); - case hipErrorLaunchOutOfResources : throw launch_out_of_resources(); - case hipErrorLaunchTimeOut : throw launch_timeout(); - // case hipErrorLaunchIncompatibleTexturing : throw launch_incompatible_texturing(); - case hipErrorPeerAccessAlreadyEnabled : throw peer_access_already_enabled(); - case hipErrorPeerAccessNotEnabled : throw peer_access_not_enabled(); - // case hipErrorPrimaryContextActive : throw primary_context_active(); - // case hipErrorContextIsDestroyed : throw context_is_destroyed(); - case hipErrorAssert : throw assert_error(); - // case hipErrorTooManyPeers : throw too_many_peers(); - case hipErrorHostMemoryAlreadyRegistered : throw host_memory_already_registered(); - case hipErrorHostMemoryNotRegistered : throw host_memory_not_registered(); - // case hipErrorHardwareStackError : throw hardware_stack_error(); - // case hipErrorIllegalInstruction : throw illegal_instruction(); - // case hipErrorMisalignedAddress : throw misaligned_address(); - // case hipErrorInvalidAddressSpace : throw invalid_address_space(); - // case hipErrorInvalidPc : throw invalid_pc(); - case hipErrorLaunchFailure : throw launch_failed(); - // case hipErrorNotPermitted : throw not_permitted(); - case hipErrorNotSupported : throw not_supported(); - case hipErrorUnknown : throw unknown(); - default : throw unknown(); -} -} - -} -} - diff --git a/lib/driver/llvm.cc b/lib/driver/llvm.cc deleted file mode 100644 index a73e6541d985..000000000000 --- a/lib/driver/llvm.cc +++ /dev/null @@ -1,376 +0,0 @@ -/* Copyright 2015-2017 Philippe Tillet - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY - * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, - * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE - * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ -#include -#if __has_include() -#include -#endif -#include -#include -#include "triton/driver/llvm.h" -#include "triton/driver/dispatch.h" -#include "triton/driver/error.h" -#include "triton/tools/sha1.hpp" -#include "triton/tools/sys/getenv.hpp" -#include "triton/tools/sys/mkdir.hpp" -#include "triton/tools/sys/exec.hpp" -#include "llvm/IR/IRBuilder.h" -#include "llvm/IR/Verifier.h" -#include "llvm/IR/IRPrintingPasses.h" -#include "llvm/IR/Module.h" -#include "llvm/Support/CodeGen.h" -#include "llvm/Support/CommandLine.h" -#include "llvm/Support/SourceMgr.h" -#include "llvm/Support/raw_ostream.h" -#include "llvm/Support/TargetRegistry.h" -#include "llvm/Support/TargetSelect.h" -#include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetOptions.h" -#include "llvm/IR/LegacyPassManager.h" -#include "llvm/ExecutionEngine/ExecutionEngine.h" -#include "llvm/ExecutionEngine/SectionMemoryManager.h" -#include "llvm/Transforms/Utils/Cloning.h" -#include "llvm/Transforms/Scalar.h" - -// begin AMD stuff -#include "llvm/Support/FileSystem.h" -#include "llvm/Support/FormattedStream.h" -#include "llvm/Support/Program.h" -#include "llvm/Support/ToolOutputFile.h" -#include "llvm/ADT/StringRef.h" -#include "llvm/Analysis/TargetLibraryInfo.h" -// end AMD stuff - -extern "C" -{ - int set_curterm(char *nterm) { return 0; } - int del_curterm(char *nterm) { return 0; } - int tigetnum(char *capname) { return 0; } - int setupterm(char *term, int fildes, int *errret) { return 0; } -} - -namespace triton -{ - namespace driver - { - - void init_llvm() - { - LLVMInitializeNVPTXTargetInfo(); - LLVMInitializeNVPTXTarget(); - LLVMInitializeNVPTXTargetMC(); - LLVMInitializeNVPTXAsmPrinter(); - LLVMInitializeAMDGPUTargetInfo(); - LLVMInitializeAMDGPUTarget(); - LLVMInitializeAMDGPUTargetMC(); - LLVMInitializeAMDGPUAsmPrinter(); - } - - /* ------------------------ */ - // CUDA // - /* ------------------------ */ - static bool find_and_replace(std::string &str, const std::string &begin, const std::string &end, const std::string &target) - { - size_t start_replace = str.find(begin); - size_t end_replace = str.find(end, start_replace); - if (start_replace == std::string::npos) - return false; - str.replace(start_replace, end_replace + 1 - start_replace, target); - return true; - } - - std::string path_to_ptxas(int &version) - { - std::vector rets; - std::string ret; - // search paths for ptxas - std::vector ptxas_prefixes = {"", "/usr/local/cuda/bin/"}; - std::string triton_ptxas = tools::getenv("TRITON_PTXAS_PATH"); - if (!triton_ptxas.empty()) - ptxas_prefixes.insert(ptxas_prefixes.begin(), triton_ptxas); - // see what path for ptxas are valid - std::vector working_ptxas; - for (std::string prefix : ptxas_prefixes) - { - std::string ptxas = prefix + "ptxas"; - bool works = tools::exec(ptxas + " --version 2>&1", ret) == 0; - if (works) - { - working_ptxas.push_back(ptxas); - rets.push_back(ret); - } - } - // error if no working ptxas was found - if (working_ptxas.empty()) - throw std::runtime_error("`ptxas` was searched in TRITON_PTXAS_PATH, /usr/local/cuda/bin/ or PATH" - " but a working version could not be found."); - std::string ptxas = working_ptxas.front(); - // parse version - std::regex version_regex("release (\\d+)\\.(\\d+)"); - std::smatch match; - bool found = false; - // currently choosing the first ptxas. Other logics can be implemented in future - for (std::string ret : rets) - { - if (std::regex_search(ret, match, version_regex)) - { - int major = std::stoi(match[1]); - int minor = std::stoi(match[2]); - version = major * 1000 + minor * 10; - found = true; - break; - } - } - if (not found) - { - throw std::runtime_error("Error in parsing version"); - } - return ptxas; - } - - int vptx(int version) - { - if (version >= 11040) - return 74; - // if(version >= 11030) return 73; - // if(version >= 11020) return 72; - // if(version >= 11010) return 71; - // if(version >= 11000) return 70; - // if(version >= 10020) return 65; - // if(version >= 10010) return 64; - // if(version >= 10000) return 63; - throw std::runtime_error("Triton requires CUDA 11.4+"); - } - - std::string llir_to_ptx(llvm::Module *module, int cc, int version) - { - // LLVM version in use may not officially support target hardware - int max_nvvm_cc = 75; - int max_nvvm_ptx = 74; - // options - auto options = llvm::cl::getRegisteredOptions(); - auto *short_ptr = static_cast *>(options["nvptx-short-ptr"]); - assert(short_ptr); - short_ptr->setValue(true); - // compute capability - std::string sm = "sm_" + std::to_string(cc); - // max PTX version - int ptx = vptx(version); - int ptx_major = ptx / 10; - int ptx_minor = ptx % 10; - // create - llvm::SmallVector buffer; - std::string triple = "nvptx64-nvidia-cuda"; - std::string proc = "sm_" + std::to_string(std::min(cc, max_nvvm_cc)); - std::string layout = ""; - std::string features = ""; - // std::string features = "+ptx" + std::to_string(std::min(ptx, max_nvvm_ptx)); - init_llvm(); - // verify and store llvm - llvm::legacy::PassManager pm; - // pm.add(llvm::createPrintModulePass(llvm::outs())); - pm.add(llvm::createVerifierPass()); - pm.run(*module); - // module->print(llvm::outs(), nullptr); - - // create machine - module->setTargetTriple(triple); - std::string error; - llvm::TargetMachine *machine; - auto target = llvm::TargetRegistry::lookupTarget(module->getTargetTriple(), error); - llvm::TargetOptions opt; - opt.AllowFPOpFusion = llvm::FPOpFusion::Fast; - opt.UnsafeFPMath = false; - opt.NoInfsFPMath = false; - opt.NoNaNsFPMath = true; - machine = target->createTargetMachine(module->getTargetTriple(), proc, features, opt, - llvm::Reloc::PIC_, llvm::None, llvm::CodeGenOpt::Aggressive); - // set data layout - if (layout.empty()) - module->setDataLayout(machine->createDataLayout()); - else - module->setDataLayout(layout); - // emit machine code - for (llvm::Function &f : module->functions()) - f.addFnAttr(llvm::Attribute::AlwaysInline); - llvm::legacy::PassManager pass; - llvm::raw_svector_ostream stream(buffer); - // emit - machine->addPassesToEmitFile(pass, stream, nullptr, llvm::CodeGenFileType::CGFT_AssemblyFile); - pass.run(*module); - - // post-process - std::string result(buffer.begin(), buffer.end()); - find_and_replace(result, ".version", "\n", ".version " + std::to_string(ptx_major) + "." + std::to_string(ptx_minor) + "\n"); - find_and_replace(result, ".target", "\n", ".target " + sm + "\n"); - while (find_and_replace(result, "\t// begin inline asm", "\n", "")) - ; - while (find_and_replace(result, "\t// end inline asm", "\n", "")) - ; - return result; - } - - std::string ptx_to_cubin(const std::string &ptx, const std::string &ptxas, int cc) - { - // compile ptx with ptxas - char _fsrc[L_tmpnam]; - char _flog[L_tmpnam]; - std::tmpnam(_fsrc); - std::tmpnam(_flog); - std::string fsrc = _fsrc; - std::string flog = _flog; - std::string fbin = fsrc + ".o"; - const char *_fbin = fbin.c_str(); - std::ofstream ofs(fsrc); - ofs << ptx << std::endl; - ofs.close(); - std::string cmd; - int err; - cmd = ptxas + " -v --gpu-name=sm_" + std::to_string(cc) + " " + fsrc + " -o " + fsrc + ".o 2> " + flog; - err = system(cmd.c_str()); - if (err != 0) - { - std::ifstream _log(_flog); - std::string log(std::istreambuf_iterator(_log), {}); - unlink(_fsrc); - unlink(_flog); - throw std::runtime_error("Internal Triton PTX codegen error: \n" + log); - } - std::ifstream _cubin(_fbin, std::ios::binary); - std::string cubin(std::istreambuf_iterator(_cubin), {}); - _cubin.close(); - unlink(_fsrc); - unlink(_flog); - unlink(_fbin); - return cubin; - } - - /* ------------------------ */ - // HIP // - /* ------------------------ */ - - std::string llir_to_amdgpu(llvm::Module *module, const std::string &_proc) - { - init_llvm(); - - // proc = std::get<0>(GetFeatureStrFromGCNArchName(rocminfo)); - // features = std::get<1>(GetFeatureStrFromGCNArchName(rocminfo)); - - // create - llvm::SmallVector buffer; - std::string triple = "amdgcn-amd-amdhsa"; - std::string layout = ""; - std::string features; - std::string proc = "gfx908"; - // verify and store llvm - llvm::legacy::PassManager pm; - pm.add(llvm::createVerifierPass()); - pm.run(*module); - // create machine - module->setTargetTriple(triple); - std::string error; - auto target = llvm::TargetRegistry::lookupTarget(module->getTargetTriple(), error); - llvm::TargetOptions opt; - opt.AllowFPOpFusion = llvm::FPOpFusion::Fast; - opt.UnsafeFPMath = false; - opt.NoInfsFPMath = false; - opt.NoNaNsFPMath = true; - llvm::TargetMachine *machine = target->createTargetMachine(module->getTargetTriple(), proc, features, opt, - llvm::Reloc::PIC_, llvm::None, - llvm::CodeGenOpt::Aggressive); - // set data layout - if (layout.empty()) - module->setDataLayout(machine->createDataLayout()); - else - module->setDataLayout(layout); - // emit machine code - for (llvm::Function &f : module->functions()) - f.addFnAttr(llvm::Attribute::AlwaysInline); - llvm::legacy::PassManager pass; - llvm::raw_svector_ostream stream(buffer); - - // create dump files - std::string module_name = module->getModuleIdentifier(); - std::error_code ec; - - // Save GCN ISA binary. - std::string isabin_path = std::string("/tmp/") + module_name + std::string(".o"); - std::unique_ptr isabin_fs( - new llvm::raw_fd_ostream(isabin_path, ec, llvm::sys::fs::OF_Text)); - if (ec) - { - std::cout << isabin_path << " was not created. error code: " << ec << std::endl; - } - - // emit - machine->addPassesToEmitFile(pass, *isabin_fs, nullptr, llvm::CGFT_ObjectFile); - pass.run(*module); - // Save GCN ISA. - std::string amdgcn_path = std::string("/tmp/") + module_name + std::string(".gcn"); - std::string result(buffer.begin(), buffer.end()); - std::ofstream amdgcn(amdgcn_path); - amdgcn << result; - amdgcn.close(); - - // generate HASCO file - std::string hsaco_path = std::string("/tmp/") + module_name + std::string(".hsaco"); - std::string error_message; - int lld_result = - llvm::sys::ExecuteAndWait("/opt/rocm/llvm/bin/ld.lld", - {"/opt/rocm/llvm/bin/ld.lld", "-flavor", "gnu", "-shared", "-o", hsaco_path, isabin_path}, - llvm::None, {}, 0, 0, &error_message); - if (lld_result) - { - std::cout << "ld.lld execute fail: " << std::endl; - std::cout << error_message << std::endl; - std::cout << lld_result << std::endl; - } - - return hsaco_path; - } - - hipModule_t amdgpu_to_hipmodule(const std::string &path) - { - // Read HSACO. - std::ifstream hsaco_file(path, std::ios::binary | std::ios::ate); - std::ifstream::pos_type hsaco_file_size = hsaco_file.tellg(); - - std::vector hsaco(hsaco_file_size); - hsaco_file.seekg(0, std::ios::beg); - hsaco_file.read(reinterpret_cast(&hsaco[0]), hsaco_file_size); - hsaco_file.close(); - hipJitOption opt[] = {hipJitOptionErrorLogBufferSizeBytes, hipJitOptionErrorLogBuffer, - hipJitOptionInfoLogBufferSizeBytes, hipJitOptionInfoLogBuffer, - hipJitOptionLogVerbose}; - const unsigned int errbufsize = 8192; - const unsigned int logbufsize = 8192; - char _err[errbufsize]; - char _log[logbufsize]; - void *optval[] = {(void *)(uintptr_t)errbufsize, - (void *)_err, (void *)(uintptr_t)logbufsize, - (void *)_log, (void *)1}; - hipModule_t ret; - dispatch::hipModuleLoadDataEx(&ret, hsaco.data(), 5, opt, optval); - return ret; - } - - } // namespace driver -} // namespace triton diff --git a/lib/ir/basic_block.cc b/lib/ir/basic_block.cc deleted file mode 100644 index 0bbc3af0fd78..000000000000 --- a/lib/ir/basic_block.cc +++ /dev/null @@ -1,91 +0,0 @@ -#include -#include -#include "triton/ir/basic_block.h" -#include "triton/ir/instructions.h" -#include "triton/ir/type.h" -#include "triton/ir/function.h" - -namespace triton { -namespace ir { - -class phi_node; - - -basic_block::basic_block(context &ctx, const std::string &name, function *parent, basic_block* next): - value(type::get_label_ty(ctx), name), ctx_(ctx), parent_(parent) { - if(parent_) - parent_->insert_block(this, next); -} - -basic_block* basic_block::create(context &ctx, const std::string &name, function *parent, basic_block* next){ - return new basic_block(ctx, name, parent, next); -} - -void basic_block::replace_phi_uses_with(basic_block* before, basic_block* after) { - for(ir::instruction* i: inst_list_){ - auto* curr_phi = dynamic_cast(i); - if(!curr_phi) - break; - // curr_phi->replace_uses_of_with(before, after); - for (size_t idx = 0; idx < curr_phi->get_num_incoming(); ++idx) - if (curr_phi->get_incoming_block(idx) == before) - curr_phi->set_incoming_block(idx, after); - } -} - -void basic_block::append_instruction(ir::instruction* i){ - i->set_parent(this); - inst_list_.push_back(i); -} - -basic_block* basic_block::split_before(ir::instruction* loc, const std::string& name) { - basic_block* ret = basic_block::create(ctx_, name, parent_, this); - ret->set_name(get_name()); - set_name("after_" + name); - - // splice instruction list - auto loc_it = std::find(inst_list_.begin(), inst_list_.end(), loc); - ret->get_inst_list().splice(ret->get_inst_list().begin(), inst_list_, inst_list_.begin(), loc_it); - for(ir::instruction* i: ret->get_inst_list()) - i->set_parent(ret); - // the predecessors of `this` becomes the predecessors of `ret` - for(ir::basic_block* pred: get_predecessors()){ - auto* term = dynamic_cast(pred->get_inst_list().back()); - assert(term); - term->replace_uses_of_with(this, ret); - replace_phi_uses_with(pred, ret); - } - ir::branch_inst* br = branch_inst::create(this); - ret->append_instruction(br); - return ret; -} - -std::vector basic_block::get_predecessors() const { - std::vector ret; - for(ir::user* u: users_) - if(auto term = dynamic_cast(u)) - ret.push_back(term->get_parent()); - return ret; -} - -std::vector basic_block::get_successors() const { - std::vector ret; - for(ir::instruction* i: inst_list_) - for(ir::value* v: i->ops()) - if(auto block = dynamic_cast(v)) - ret.push_back(block); - return ret; -} - -basic_block::iterator basic_block::get_first_non_phi(){ - auto it = begin(); - for(; it != end(); it++) - if(!dynamic_cast(*it)){ - return it; - } - return it; -} - -} - -} diff --git a/lib/ir/builder.cc b/lib/ir/builder.cc deleted file mode 100644 index 7f7dfdc982b9..000000000000 --- a/lib/ir/builder.cc +++ /dev/null @@ -1,491 +0,0 @@ -#include -#include -#include -#include "triton/ir/basic_block.h" -#include "triton/ir/builder.h" -#include "triton/ir/constant.h" -#include "triton/ir/instructions.h" -#include "triton/ir/type.h" - -namespace triton{ -namespace ir{ - -builder::builder(context &ctx): - ctx_(ctx), block_(nullptr) {} - -//===----------------------------------------------------------------------===// -// utilities -//===----------------------------------------------------------------------===// -void builder::set_insert_point(basic_block::iterator it){ - block_ = (*it)->get_parent(); - insert_point_ = it; -} - -void builder::set_insert_point(instruction* i){ - block_ = i->get_parent(); - auto it = std::find(block_->begin(), block_->end(), i); - set_insert_point(it); -} - - -void builder::set_insert_point_after(instruction* i){ - block_ = i->get_parent(); - auto it = std::find(block_->begin(), block_->end(), i); - set_insert_point(++it); -} - - -void builder::set_insert_point(basic_block *block){ - block_ = block; - insert_point_ = block->end(); -} - - -//===----------------------------------------------------------------------===// -// convenience functions -//===----------------------------------------------------------------------===// - -value *builder::get_int1(bool val) -{ return constant_int::get(type::get_int1_ty(ctx_), val); } - -value *builder::get_int32(uint32_t val) -{ return constant_int::get(type::get_int32_ty(ctx_), val);} - -value *builder::get_int64(uint64_t val) -{ return constant_int::get(type::get_int64_ty(ctx_), val);} - -value *builder::get_float16(float val) -{ return constant_fp::get(type::get_fp16_ty(ctx_), val); } - -value *builder::get_float32(float val) -{ return constant_fp::get(type::get_fp32_ty(ctx_), val); } - -value *builder::get_range(int32_t _lo, int32_t _hi) { - constant_int* lo = static_cast(get_int32(_lo)); - constant_int* hi = static_cast(get_int32(_hi)); - return insert(make_range::create(lo, hi)); -} - -type *builder::get_void_ty() -{ return type::get_void_ty(ctx_); } - -type *builder::get_int1_ty() -{ return type::get_int1_ty(ctx_); } - -type *builder::get_int8_ty() -{ return type::get_int8_ty(ctx_); } - -type *builder::get_int16_ty() -{ return type::get_int16_ty(ctx_); } - -type *builder::get_int32_ty() -{ return type::get_int32_ty(ctx_); } - -type *builder::get_int64_ty() -{ return type::get_int64_ty(ctx_); } - -type *builder::get_fp8_ty() -{ return type::get_fp8_ty(ctx_); } - -type *builder::get_half_ty() -{ return type::get_fp16_ty(ctx_); } - -type *builder::get_bf16_ty() -{ return type::get_bf16_ty(ctx_); } - -type *builder::get_float_ty() -{ return type::get_fp32_ty(ctx_); } - -type *builder::get_double_ty() -{ return type::get_fp64_ty(ctx_); } - - -//===----------------------------------------------------------------------===// -// terminator instructions -//===----------------------------------------------------------------------===// - -value* builder::create_br(basic_block *dest){ - return insert(branch_inst::create(dest)); -} - -value* builder::create_cond_br(value *cond, basic_block *if_dest, basic_block *else_dest){ - return insert(branch_inst::create(cond, if_dest, else_dest)); -} - -value *builder::create_ret_void() { - return insert(return_inst::create(ctx_)); -} - -value *builder::create_ret(value* val) { - return insert(return_inst::create(ctx_, val)); -} - -//===----------------------------------------------------------------------===// -// dequantize instructions -//===----------------------------------------------------------------------===// - -value* builder::create_dequantize(value *src, value *scale, value *shift, type *dst_ty){ - return insert(dequantize_inst::create(src, scale, shift, dst_ty)); -} - -//===----------------------------------------------------------------------===// -// cast instructions -//===----------------------------------------------------------------------===// -#define DEFINE_CAST_INSTR(SUFFIX, OPCODE)\ - value *builder::create_ ## SUFFIX(value *src, type *dst_ty){\ - return create_cast(OPCODE, src, dst_ty);\ - } - -DEFINE_CAST_INSTR(bitcast, cast_op_t::BitCast) -DEFINE_CAST_INSTR(int_to_ptr, cast_op_t::IntToPtr) -DEFINE_CAST_INSTR(ptr_to_int, cast_op_t::PtrToInt) -DEFINE_CAST_INSTR(si_to_fp, cast_op_t::SIToFP) -DEFINE_CAST_INSTR(ui_to_fp, cast_op_t::UIToFP) -DEFINE_CAST_INSTR(fp_to_si, cast_op_t::FPToSI) -DEFINE_CAST_INSTR(fp_to_ui, cast_op_t::FPToUI) -DEFINE_CAST_INSTR(fp_ext, cast_op_t::FPExt) -DEFINE_CAST_INSTR(fp_trunc, cast_op_t::FPTrunc) - -value* builder::create_cast(cast_op_t op, value *v, type *dst_ty){ - return insert(cast_inst::create(op, v, dst_ty)); -} - -value* builder::create_int_cast(value *src, type *dst_ty, bool is_signed){ - return insert(cast_inst::create_integer_cast(src, dst_ty, is_signed)); -} - -//===----------------------------------------------------------------------===// -// phi instructions -//===----------------------------------------------------------------------===// - -phi_node* builder::create_phi(type *ty, unsigned num_reserved){ - return insert(phi_node::create(ty, num_reserved)); -} - -//===----------------------------------------------------------------------===// -// call instructions -//===----------------------------------------------------------------------===// - -value *builder::create_call(function* fn, const std::vector& args){ - return insert(call_inst::create(fn, args)); -} - -value* builder::create_launch(function* fn, const std::vector& args, const std::vector& grid, value* num_warps){ - return insert(launch_inst::create(fn, args, grid, num_warps)); - -} - -//===----------------------------------------------------------------------===// -// binary float instructions -//===----------------------------------------------------------------------===// - -#define DEFINE_BINARY_FLOAT(SUFFIX, OPCODE)\ - value *builder::create_ ## SUFFIX(value *lhs, value *rhs){\ - return insert(binary_operator::create(OPCODE, lhs, rhs));\ - } - -// Binary -DEFINE_BINARY_FLOAT(fmul, binary_op_t::FMul) -DEFINE_BINARY_FLOAT(fdiv, binary_op_t::FDiv) -DEFINE_BINARY_FLOAT(frem, binary_op_t::FRem) -DEFINE_BINARY_FLOAT(fadd, binary_op_t::FAdd) -DEFINE_BINARY_FLOAT(fsub, binary_op_t::FSub) - - -//===----------------------------------------------------------------------===// -// binary int instructions -//===----------------------------------------------------------------------===// - - -value* builder::create_insert_nuwnswb_binop(binary_op_t op, value *lhs, - value *rhs, - bool has_nuw, bool has_nsw) { - binary_operator* result = insert(binary_operator::create(op, lhs, rhs)); - if (has_nuw) result->set_has_no_unsigned_wrap(); - if (has_nsw) result->set_has_no_signed_wrap(); - return result; -} - -#define DEFINE_NOWRAP_BINARY(SUFFIX, OPCODE)\ - value* builder::create_ ## SUFFIX(value *lhs, value *rhs, bool has_nuw, bool has_nsw){\ - return create_insert_nuwnswb_binop(OPCODE, lhs, rhs, has_nuw, has_nsw);\ - }\ - -#define DEFINE_BINARY_INT(SUFFIX, OPCODE)\ - value *builder::create_ ## SUFFIX(value *lhs, value *rhs){\ - return create_insert_nuwnswb_binop(OPCODE, lhs, rhs, false, false);\ - } - - - -// Binary -DEFINE_NOWRAP_BINARY(mul, binary_op_t::Mul) -DEFINE_NOWRAP_BINARY(add, binary_op_t::Add) -DEFINE_NOWRAP_BINARY(sub, binary_op_t::Sub) -DEFINE_NOWRAP_BINARY(shl, binary_op_t::Shl) -DEFINE_NOWRAP_BINARY(ashr, binary_op_t::AShr) -DEFINE_NOWRAP_BINARY(lshr, binary_op_t::LShr) -DEFINE_BINARY_INT(sdiv, binary_op_t::SDiv) -DEFINE_BINARY_INT(udiv, binary_op_t::UDiv) -DEFINE_BINARY_INT(srem, binary_op_t::SRem) -DEFINE_BINARY_INT(urem, binary_op_t::URem) -DEFINE_BINARY_INT(and, binary_op_t::And) -DEFINE_BINARY_INT(or, binary_op_t::Or) -DEFINE_BINARY_INT(xor, binary_op_t::Xor) - - -//===----------------------------------------------------------------------===// -// getelementptr instructions -//===----------------------------------------------------------------------===// - -value* builder::create_gep(value *ptr, const std::vector& idx_list){ - return insert(getelementptr_inst::create(ptr, idx_list)); -} - -//===----------------------------------------------------------------------===// -// icmp instructions -//===----------------------------------------------------------------------===// - -value *builder::create_icmp(cmp_pred_t pred, value *lhs, value *rhs){ - return insert(icmp_inst::create(pred, lhs, rhs)); -} - -#define DEFINE_ICMP_INSTR(SUFFIX, OPCODE)\ - value *builder::create_icmp ## SUFFIX(value *lhs, value *rhs){\ - return create_icmp(OPCODE, lhs, rhs);\ - } - -// Signed -DEFINE_ICMP_INSTR(SLE, cmp_pred_t::ICMP_SLE) -DEFINE_ICMP_INSTR(SLT, cmp_pred_t::ICMP_SLT) -DEFINE_ICMP_INSTR(SGE, cmp_pred_t::ICMP_SGE) -DEFINE_ICMP_INSTR(SGT, cmp_pred_t::ICMP_SGT) -// Unsigned -DEFINE_ICMP_INSTR(ULE, cmp_pred_t::ICMP_ULE) -DEFINE_ICMP_INSTR(ULT, cmp_pred_t::ICMP_ULT) -DEFINE_ICMP_INSTR(UGE, cmp_pred_t::ICMP_UGE) -DEFINE_ICMP_INSTR(UGT, cmp_pred_t::ICMP_UGT) -// General -DEFINE_ICMP_INSTR(EQ, cmp_pred_t::ICMP_EQ) -DEFINE_ICMP_INSTR(NE, cmp_pred_t::ICMP_NE) - - -//===----------------------------------------------------------------------===// -// fcmp instructions -//===----------------------------------------------------------------------===// - -value *builder::create_fcmp(cmp_pred_t pred, value *lhs, value *rhs){ - return insert(fcmp_inst::create(pred, lhs, rhs)); -} - -#define DEFINE_FCMP_INSTR(SUFFIX, OPCODE)\ - value *builder::create_fcmp ## SUFFIX(value *lhs, value *rhs){\ - return create_fcmp(OPCODE, lhs, rhs);\ - } - -// Ordered -DEFINE_FCMP_INSTR(OLE, cmp_pred_t::FCMP_OLE) -DEFINE_FCMP_INSTR(OLT, cmp_pred_t::FCMP_OLT) -DEFINE_FCMP_INSTR(OGE, cmp_pred_t::FCMP_OGE) -DEFINE_FCMP_INSTR(OGT, cmp_pred_t::FCMP_OGT) -DEFINE_FCMP_INSTR(OEQ, cmp_pred_t::FCMP_OEQ) -DEFINE_FCMP_INSTR(ONE, cmp_pred_t::FCMP_ONE) - -DEFINE_FCMP_INSTR(ULE, cmp_pred_t::FCMP_ULE) -DEFINE_FCMP_INSTR(ULT, cmp_pred_t::FCMP_ULT) -DEFINE_FCMP_INSTR(UGE, cmp_pred_t::FCMP_UGE) -DEFINE_FCMP_INSTR(UGT, cmp_pred_t::FCMP_UGT) -DEFINE_FCMP_INSTR(UEQ, cmp_pred_t::FCMP_UEQ) -DEFINE_FCMP_INSTR(UNE, cmp_pred_t::FCMP_UNE) - - -//===----------------------------------------------------------------------===// -// load/store instructions -//===----------------------------------------------------------------------===// - -value *builder::create_load(value *ptr, load_inst::CACHE_MODIFIER cache, load_inst::EVICTION_POLICY eviction, bool is_volatile){ - return insert(unmasked_load_inst::create(ptr, cache, eviction, is_volatile)); -} - -value *builder::create_store(value *ptr, value *val, store_inst::EVICTION_POLICY eviction){ - return insert(unmasked_store_inst::create(ptr, val, eviction)); -} - -value *builder::create_masked_load(value *ptr, value *mask, value *false_value, load_inst::CACHE_MODIFIER cache, load_inst::EVICTION_POLICY eviction, bool is_volatile){ - return insert(masked_load_inst::create(ptr, mask, false_value, cache, eviction, is_volatile)); -} - -value *builder::create_masked_store(value *ptr, value *val, value *mask, store_inst::EVICTION_POLICY eviction){ - return insert(masked_store_inst::create(ptr, val, mask, eviction)); -} - -//===----------------------------------------------------------------------===// -// struct instructions -//===----------------------------------------------------------------------===// - - -// Struct instructions -value *builder::create_insert_value(value* val, value *elt, size_t idx){ - return insert(insert_value_inst::create(val, elt, idx)); -} - -value *builder::create_extract_value(value* val, size_t idx) { - return insert(extract_value_inst::create(val, idx)); -} -//===----------------------------------------------------------------------===// -// block instructions -//===----------------------------------------------------------------------===// - -value *builder::create_reshape(value *arg, const type::block_shapes_t &shapes) { - return insert(reshape_inst::create(arg, shapes)); -} - -value *builder::create_cat(value *lhs, value *rhs) { - return insert(cat_inst::create(lhs, rhs)); -} - -value *builder::create_splat(value *arg, const type::block_shapes_t &shapes) { - return insert(splat_inst::create(arg, shapes)); -} - -value *builder::create_broadcast(value *arg, const type::block_shapes_t &shapes) { - return insert(broadcast_inst::create(arg, shapes)); -} - -value *builder::create_downcast(value *arg) { - return insert(downcast_inst::create(arg)); -} - -// - -value *builder::create_atomic_rmw(ir::atomic_rmw_op_t op, value *ptr, value *val, value *msk){ - return insert(atomic_rmw_inst::create(op, ptr, val, msk)); -} - -#define DEFINE_ATOMIC_RMW_INSTR(SUFFIX, OPCODE)\ - value *builder::create_ ## SUFFIX(value *ptr, value *val, value *mask){\ - return create_atomic_rmw(OPCODE, ptr, val, mask);\ - } - -DEFINE_ATOMIC_RMW_INSTR(atomic_max, ir::atomic_rmw_op_t::Max) -DEFINE_ATOMIC_RMW_INSTR(atomic_umax, ir::atomic_rmw_op_t::UMax) -DEFINE_ATOMIC_RMW_INSTR(atomic_min, ir::atomic_rmw_op_t::Min) -DEFINE_ATOMIC_RMW_INSTR(atomic_umin, ir::atomic_rmw_op_t::UMin) -DEFINE_ATOMIC_RMW_INSTR(atomic_fadd, ir::atomic_rmw_op_t::FAdd) -DEFINE_ATOMIC_RMW_INSTR(atomic_add, ir::atomic_rmw_op_t::Add) -DEFINE_ATOMIC_RMW_INSTR(atomic_and, ir::atomic_rmw_op_t::And) -DEFINE_ATOMIC_RMW_INSTR(atomic_or, ir::atomic_rmw_op_t::Or) -DEFINE_ATOMIC_RMW_INSTR(atomic_xor, ir::atomic_rmw_op_t::Xor) -DEFINE_ATOMIC_RMW_INSTR(atomic_xchg, ir::atomic_rmw_op_t::Xchg) - -// Utilities -value *builder::create_clock() { - return insert(clock_inst::create(ctx_)); -} - -value *builder::create_globaltimer() { - return insert(globaltimer_inst::create(ctx_)); -} - -//===----------------------------------------------------------------------===// -// externs -//===----------------------------------------------------------------------===// - -value *builder::create_extern_elementwise(const std::string &lib_name, - const std::string &lib_path, - const std::string &symbol_name, - const std::vector &args, - type *ret_ty) { - return insert(extern_elementwise_inst::create(ctx_, args, ret_ty, lib_name, - lib_path, symbol_name)); -} - -//===----------------------------------------------------------------------===// -// built-in instructions -//===----------------------------------------------------------------------===// - -value *builder::create_get_program_id(unsigned axis) { - return insert(get_program_id_inst::create(ctx_, axis)); -} - -value *builder::create_get_num_programs(unsigned axis) { - return insert(get_num_programs_inst::create(ctx_, axis)); -} - -value *builder::create_atomic_cas(value *ptr, value *cmp, value *val){ - return insert(atomic_cas_inst::create(ptr, cmp, val)); -} - - -value *builder::create_exp(value *arg){ - return insert(exp_inst::create(arg)); -} - -value *builder::create_cos(value *arg){ - return insert(cos_inst::create(arg)); -} - -value *builder::create_sin(value *arg){ - return insert(sin_inst::create(arg)); -} - -value *builder::create_log(value *arg){ - return insert(log_inst::create(arg)); -} - -value *builder::create_dot(value *A, value *B, value *C, bool trans_a, bool trans_b, bool allow_tf32) { - return insert(dot_inst::create(A, B, C, trans_a, trans_b, allow_tf32)); -} - -value *builder::create_trans(value *A, const std::vector& perm) { - return insert(trans_inst::create(A, perm)); -} - -value *builder::create_sqrt(value *A) { - return insert(sqrt_inst::create(A)); -} - -value *builder::create_reduce(value *A, reduce_inst::op_t op, unsigned axis) { - return insert(reduce_inst::create(A, op, axis)); -} - -value *builder::create_select(value *pred, value *if_value, value *else_value){ - return insert(select_inst::create(pred, if_value, else_value)); -} - -//===----------------------------------------------------------------------===// -// intrinsic instructions -//===----------------------------------------------------------------------===// - -value *builder::create_umulhi(value *lhs, value *rhs) { - return insert(umulhi_inst::create(lhs, rhs)); -} - -value *builder::create_copy_to_shared(value *arg) { - return insert(copy_to_shared_inst::create(arg)); -} - - -value *builder::create_copy_from_shared(value *arg) { - return insert(copy_from_shared_inst::create(arg)); -} - -value *builder::create_masked_load_async(value *ptr, value *mask, value *false_value, load_inst::CACHE_MODIFIER cache, load_inst::EVICTION_POLICY eviction) { - return insert(masked_load_async_inst::create(ptr, mask, false_value, cache, eviction)); -} - -value *builder::create_barrier(const std::string &name) { - return insert(barrier_inst::create(ctx_)); -} - -value *builder::create_async_wait(int N) { - return insert(async_wait_inst::create(ctx_, N)); -} - -value *builder::create_prefetch_s(value *arg, int inc) { - return insert(prefetch_s_inst::create(ctx_, arg, inc)); -} - - -} -} diff --git a/lib/ir/constant.cc b/lib/ir/constant.cc deleted file mode 100644 index 417626c920f6..000000000000 --- a/lib/ir/constant.cc +++ /dev/null @@ -1,120 +0,0 @@ -#include -#include -#include "triton/ir/constant.h" -#include "triton/ir/type.h" -#include "triton/ir/context.h" -#include "triton/ir/context_impl.h" - -namespace triton{ -namespace ir{ - - -// constant - -constant *constant::get_null_value(type *ty) { - context &ctx = ty->get_context(); - switch (ty->get_scalar_ty()->get_type_id()) { - case type::IntegerTyID: - return constant_int::get(ty, 0); - case type::FP16TyID: - return constant_fp::get(type::get_fp16_ty(ctx), 0); - case type::BF16TyID: - return constant_fp::get(type::get_bf16_ty(ctx), 0); - case type::FP32TyID: - return constant_fp::get(type::get_fp32_ty(ctx), 0); - case type::FP64TyID: - return constant_fp::get(type::get_fp64_ty(ctx), 0); - default: - throw std::runtime_error("Cannot create a null constant of that type!"); - } -} - -// FIXME - -constant *constant::get_all_ones_value(type *ty) { - if(ty->is_integer_ty()) - return constant_int::get(ty, 0xFFFFFFFFFFFFFFFF); - if(ty->is_floating_point_ty()) - return constant_fp::get(ty, 0xFFFFFFFFFFFFFFFF); - throw std::runtime_error("Cannot create all ones value for that type!"); -} - -// constant_int -// FIXME use something like APInt - -constant_int::constant_int(type *ty, uint64_t value) - : constant(ty, 0), value_(value){ } - -constant_int *constant_int::get(type *ty, uint64_t value) { - if (!ty->is_integer_ty()) - throw std::runtime_error("Cannot create constant_int with non integer ty"); - context_impl *impl = ty->get_context().p_impl.get(); - std::unique_ptr &cst = impl->int_constants_[std::make_pair(ty, value)]; - if(!cst) - cst.reset(new constant_int(ty, value)); - return cst.get(); -} - - -// constant_fp -// FIXME use something like APFloat - -constant_fp::constant_fp(type *ty, double value) - : constant(ty, 0), value_(value){ } - -constant *constant_fp::get_negative_zero(type *ty){ - double neg_zero = 0; - return get(ty, neg_zero); -} - -constant *constant_fp::get_zero_value_for_negation(type *ty) { - if(ty->get_scalar_ty()->is_floating_point_ty()) - return constant_fp::get(ty, 0); - return constant::get_null_value(ty); -} - -constant *constant_fp::get(type *ty, double v){ - context_impl *impl = ty->get_context().p_impl.get(); - std::unique_ptr &result = impl->fp_constants_[std::make_pair(ty, v)]; - if(!result) - result.reset(new constant_fp(ty, v)); - return result.get(); -} - - -// undef value -undef_value::undef_value(type *ty) - : constant(ty, 0) { } - -undef_value *undef_value::get(type *ty) { - context_impl *impl = ty->get_context().p_impl.get(); - std::unique_ptr &result = impl->uv_constants_[ty]; - if(!result) - result.reset(new undef_value(ty)); - return result.get(); -} - -/* global value */ -global_value::global_value(type *ty, unsigned num_ops, - linkage_types_t linkage, - const std::string &name, unsigned addr_space) - : constant(pointer_type::get(ty, addr_space), num_ops, name), - linkage_(linkage) { } - - -/* global object */ -global_object::global_object(type *ty, unsigned num_ops, - linkage_types_t linkage, - const std::string &name, unsigned addr_space) - : global_value(ty, num_ops, linkage, name, addr_space) { } - - -/* alloc const */ -alloc_const::alloc_const(type *ty, constant_int *size, const std::string &name) - : global_object(ty, 1, global_value::external, name, 4) { - set_operand(0, size); -} - - -} -} diff --git a/lib/ir/context.cc b/lib/ir/context.cc deleted file mode 100644 index 0fc65ddc2d8c..000000000000 --- a/lib/ir/context.cc +++ /dev/null @@ -1,40 +0,0 @@ -#include "triton/ir/context_impl.h" -#include "triton/ir/context.h" -#include "triton/ir/type.h" - -namespace triton{ -namespace ir{ - -//===----------------------------------------------------------------------===// -// context implementation -//===----------------------------------------------------------------------===// - -context_impl::context_impl(context &ctx) - : void_ty(ctx, type::VoidTyID), - label_ty(ctx, type::LabelTyID), - // floating point - fp8_ty(ctx, type::FP8TyID), - fp16_ty(ctx, type::FP16TyID), - bf16_ty(ctx, type::BF16TyID), - fp32_ty(ctx, type::FP32TyID), - fp64_ty(ctx, type::FP64TyID), - // integers - int1_ty(ctx, 1), - int8_ty(ctx, 8), - int16_ty(ctx, 16), - int32_ty(ctx, 32), - int64_ty(ctx, 64), - int128_ty(ctx, 128) {} - -//===----------------------------------------------------------------------===// -// context -//===----------------------------------------------------------------------===// - -context::context(): - p_impl(std::make_shared(*this)) { - -} - - -} -} diff --git a/lib/ir/function.cc b/lib/ir/function.cc deleted file mode 100644 index 4f3cd5ac6723..000000000000 --- a/lib/ir/function.cc +++ /dev/null @@ -1,66 +0,0 @@ -#include -#include "triton/ir/function.h" -#include "triton/ir/type.h" -#include "triton/ir/module.h" - -namespace triton{ -namespace ir{ - - -/* Argument */ - -argument::argument(type *ty, const std::string &name, function *parent, unsigned arg_no) - : value(ty, name), parent_(parent), arg_no_(arg_no) { } - -argument *argument::create(type *ty, const std::string &name, - function *parent, unsigned arg_no) { - return new argument(ty, name, parent, arg_no); -} - -function* argument::get_parent() const { - return parent_; -} - -unsigned argument::get_arg_no() const { - return arg_no_; -} - -void argument::accept(visitor *v) { - v->visit_argument(this); -} - - -/* function */ -function::function(function_type *ty, linkage_types_t linkage, - const std::string &name, module *parent) - : global_object(ty, 0, linkage, name), parent_(parent), fn_ty_(ty), is_kernel_(false) { - unsigned num_params = fn_ty_->get_num_params(); - if(parent) - parent->push_function(this); - // skip if no parameter - if(num_params == 0) - return; - // create arguments - args_.resize(num_params); - for(unsigned i = 0; i < num_params; i++){ - type *param_ty = fn_ty_->get_param_ty(i); - args_[i] = argument::create(param_ty, "", this, i); - } -} - -/* basic block */ -void function::insert_block(basic_block *block, basic_block *next) { - auto it = std::find(blocks_.begin(), blocks_.end(), next); - blocks_.insert(it, block); -} - - -function *function::create(function_type *ty, linkage_types_t linkage, - const std::string &name, module *mod) { - return new function(ty, linkage, name, mod); -} - - -} -} - diff --git a/lib/ir/instructions.cc b/lib/ir/instructions.cc deleted file mode 100644 index 92a466f8ffe5..000000000000 --- a/lib/ir/instructions.cc +++ /dev/null @@ -1,1059 +0,0 @@ -#include -#include -#include "triton/ir/context.h" -#include "triton/ir/basic_block.h" -#include "triton/ir/instructions.h" -#include "triton/ir/constant.h" -#include "triton/ir/type.h" -#include "triton/ir/function.h" - -namespace triton{ -namespace ir{ - -//===----------------------------------------------------------------------===// -// instruction classes -//===----------------------------------------------------------------------===// - -instruction::instruction(type *ty, value_id_t ity, unsigned num_ops, - const std::string &name, instruction *next) - : user(ty, num_ops, name), id_(ity) { - if(next){ - basic_block *block = next->get_parent(); - assert(block && "Next instruction is not in a basic block!"); - auto it = std::find(block->begin(), block->end(), next); - block->get_inst_list().insert(it, next); - } -} - -void instruction::erase_from_parent() { - parent_->erase(this); - for(ir::value* op: ops()) - op->erase_use(this); -} - -bool instruction::has_tile_result_or_op() { - bool result = get_type()->is_block_ty(); - for(unsigned i = 0; i < get_num_operands(); i++) - result |= get_operand(i)->get_type()->is_block_ty(); - return result; -} - -//===----------------------------------------------------------------------===// -// phi_node classes -//===----------------------------------------------------------------------===// - -phi_node::phi_node(type *ty, unsigned num_reserved, std::string const &name, instruction *next) - : instruction(ty, INST_PHI, 0, name, next) { - blocks_.reserve(num_reserved); -} - -value* phi_node::get_value_for_block(basic_block * block) { - auto it = std::find(blocks_.begin(), blocks_.end(), block); - size_t n = std::distance(blocks_.begin(), it); - return get_incoming_value(n); -} - -// Set incoming value -void phi_node::set_incoming_value(unsigned i, value *v){ - assert(v && "PHI node got a null value!"); - assert(get_type() == v->get_type() && - "All operands to PHI node must be the same type as the PHI node!"); - set_operand(i, v); -} - -// Set incoming block -void phi_node::set_incoming_block(unsigned i, basic_block *block){ - assert(block && "PHI node got a null basic block!"); - blocks_[i] = block; -} - -// Add incoming -void phi_node::add_incoming(value *v, basic_block *block){ - assert(v && "PHI node got a null value!!"); - resize_ops(get_num_operands() + 1); - blocks_.resize(get_num_operands() + 1); - set_incoming_value(get_num_operands() - 1, v); - set_incoming_block(get_num_operands() - 1, block); -} - -// Factory methods -phi_node* phi_node::create(type *ty, unsigned num_reserved, const std::string &name, instruction *next){ - return new phi_node(ty, num_reserved, name, next); -} - -//===----------------------------------------------------------------------===// -// call_inst classes -//===----------------------------------------------------------------------===// - -std::string call_inst::repr_impl() const { return "call " + fn_->get_name(); } - -call_inst::call_inst(ir::function* fn, const std::vector& values, const std::string& name, instruction* next) - : instruction(fn->get_fn_type()->get_return_ty(), INST_CALL, values.size(), name, next), fn_(fn){ - for(size_t i = 0; i < values.size(); i++) - set_operand(i, values.at(i)); -} - -call_inst* call_inst::create(ir::function* fn, const std::vector& values, const std::string &name, instruction *next) { - return new call_inst(fn, values, name, next); -} - - -// launch - -launch_inst::launch_inst(ir::function* fn, const std::vector& values, const std::vector& grid, ir::value* num_warps, const std::string& name, instruction* next) - : instruction(fn->get_fn_type()->get_return_ty(), INST_LAUNCH, 1 + values.size() + grid.size() + 1, name, next){ - int k = 0; - if(grid.size() != 3) - throw std::runtime_error("grid must have 3 elements"); - set_operand(k++, fn); - val_begin = k; - for(ir::value* v: values) - set_operand(k++, v); - val_end = k; - grid_begin = k; - for(ir::value* g: grid) - set_operand(k++, g); - grid_end = k; - set_operand(k++, num_warps); -} - - -ir::function* launch_inst::get_fn() { - return (ir::function*)get_operand(0); -} - -std::vector launch_inst::get_values() { - std::vector ret; - for(int i = val_begin; i < val_end; i++) - ret.push_back(get_operand(i)); - return ret; -} - -std::vector launch_inst::get_grid() { - std::vector ret; - for(int i = grid_begin; i < grid_end; i++) - ret.push_back(get_operand(i)); - return ret; -} - -ir::value* launch_inst::get_num_warps() { - return get_operand(grid_end); -} - - -launch_inst* launch_inst::create(ir::function *fn, const std::vector &values, const std::vector &grid, ir::value *num_warps, const std::string &name, instruction *next) { - return new launch_inst(fn, values, grid, num_warps, name, next); -} - - -//===----------------------------------------------------------------------===// -// binary_operator classes -//===----------------------------------------------------------------------===// - -std::string binary_operator::repr_impl() const { - switch(op_) { - case Add : return "add"; - case FAdd : return "fadd"; - case Sub : return "sub"; - case FSub : return "fsub"; - case Mul : return "mul"; - case FMul : return "fmul"; - case UDiv : return "udiv"; - case SDiv : return "sdiv"; - case FDiv : return "fdiv"; - case URem : return "urem"; - case SRem : return "srem"; - case FRem : return "frem"; - case Shl : return "shl"; - case LShr : return "lshr"; - case AShr : return "ashr"; - case And : return "and"; - case Or : return "or"; - case Xor : return "xor"; - default: throw std::runtime_error("unknown binary operator"); - } -} - -bool binary_operator::is_int_div() const { - return op_ == binary_op_t::UDiv || op_ == binary_op_t::SDiv; -} - -bool binary_operator::is_int_rem() const { - return op_ == binary_op_t::URem || op_ == binary_op_t::SRem; -} - -bool binary_operator::is_shl() const { - return op_ == binary_op_t::Shl; -} - -bool binary_operator::is_shr() const { - return op_ == binary_op_t::LShr || op_ == binary_op_t::AShr; -} - -bool binary_operator::is_int_mult() const { - return op_ == binary_op_t::Mul; -} - -bool binary_operator::is_int_add_sub() const { - return op_ == binary_op_t::Add || op_ == binary_op_t::Sub; -} - - -binary_operator::binary_operator(binary_op_t op, value *lhs, value *rhs, type *ty, const std::string &name, instruction *next) - : instruction(ty, INST_BINOP, 2, name, next), op_(op), fdiv_ieee_rnd_(false){ - set_operand(0, lhs); - set_operand(1, rhs); -} - -binary_operator *binary_operator::create(binary_op_t op, value *lhs, value *rhs, const std::string &name, instruction *next){ - assert(lhs->get_type() == rhs->get_type() && - "Cannot create binary operator with two operands of differing type!"); - return new binary_operator(op, lhs, rhs, lhs->get_type(), name, next); -} - -//binary_operator *binary_operator::create_fneg(value *arg, const std::string &name, instruction *next){ -// assert(arg->get_type()->get_scalar_ty()->is_floating_point_ty()); -// value *zero = constant_fp::get_zero_value_for_negation(arg->get_type()); -// return binary_operator::create(binary_op_t::FSub, zero, arg, name, next); -//} - -//binary_operator *binary_operator::create_neg(value *arg, const std::string &name, instruction *next){ -// assert(arg->get_type()->get_scalar_ty()->is_integer_ty()); -// value *zero = constant_fp::get_zero_value_for_negation(arg->get_type()->get_scalar_ty()); -// return binary_operator::create(binary_op_t::Sub, zero, arg, name, next); -//} - -//binary_operator *binary_operator::create_not(value *arg, const std::string &name, instruction *next){ -// assert(arg->get_type()->is_integer_ty()); -// constant *mask = constant::get_all_ones_value(arg->get_type()); -// return binary_operator::create(binary_op_t::Xor, arg, mask, name, next); -//} - -//===----------------------------------------------------------------------===// -// cmp_inst classes -//===----------------------------------------------------------------------===// - - - -// cmp_inst -std::string cmp_inst::repr_impl() const { - switch (pred_) { - case FCMP_FALSE : return "false"; - case FCMP_OEQ : return "fcmp_oeq"; - case FCMP_OGT : return "fcmp_ogt"; - case FCMP_OGE : return "fcmp_oge"; - case FCMP_OLT : return "fcmp_olt"; - case FCMP_OLE : return "fcmp_ole"; - case FCMP_ONE : return "fcmp_one"; - case FCMP_ORD : return "fcmp_ord"; - case FCMP_UNO : return "fcmp_uno"; - case FCMP_UEQ : return "fcmp_ueq"; - case FCMP_UGT : return "fcmp_ugt"; - case FCMP_UGE : return "fcmp_uge"; - case FCMP_ULT : return "fcmp_ult"; - case FCMP_ULE : return "fcmp_ule"; - case FCMP_UNE : return "fcmp_une"; - case FCMP_TRUE : return "true"; - case ICMP_EQ : return "icmp_eq"; - case ICMP_NE : return "icmp_ne"; - case ICMP_UGT : return "icmp_ugt"; - case ICMP_UGE : return "icmp_uge"; - case ICMP_ULT : return "icmp_ult"; - case ICMP_ULE : return "icmp_ule"; - case ICMP_SGT : return "icmp_sgt"; - case ICMP_SGE : return "icmp_sge"; - case ICMP_SLT : return "icmp_slt"; - case ICMP_SLE : return "icmp_sle"; - default: throw std::runtime_error("unreachable"); - } -} - -cmp_inst::cmp_inst(type *ty, value_id_t id, cmp_pred_t pred, value *lhs, value *rhs, const std::string &name, instruction *next) - : instruction(ty, id, 2, name, next), pred_(pred) { - set_operand(0, lhs); - set_operand(1, rhs); -} - -type* cmp_inst::make_cmp_result_type(type *ty){ - type* int1_ty = type::get_int1_ty(ty->get_context()); - if (block_type* tile_ty = dynamic_cast(ty)) - return block_type::get_same_shapes(int1_ty, tile_ty); - return int1_ty; -} - - -bool cmp_inst::is_fp_predicate(cmp_pred_t pred) { - return pred >= FIRST_FCMP_PREDICATE && pred <= LAST_FCMP_PREDICATE; -} - -bool cmp_inst::is_int_predicate(cmp_pred_t pred) { - return pred >= FIRST_ICMP_PREDICATE && pred <= LAST_ICMP_PREDICATE; -} - - -// icmp_inst -icmp_inst::icmp_inst(type *ty, cmp_pred_t pred, - value *lhs, value *rhs, const std::string &name, instruction *next) - : cmp_inst(ty, INST_ICMP, pred, lhs, rhs, name, next){ } - -icmp_inst* icmp_inst::create(cmp_pred_t pred, value *lhs, value *rhs, const std::string &name, instruction *next){ - assert(is_int_predicate(pred)); - assert(lhs->get_type() == rhs->get_type()); - type *res_ty = make_cmp_result_type(lhs->get_type()); - return new icmp_inst(res_ty, pred, lhs, rhs, name, next); -} - -// fcmp_inst -fcmp_inst::fcmp_inst(type *ty, cmp_pred_t pred, - value *lhs, value *rhs, const std::string &name, instruction *next) - : cmp_inst(ty, INST_FCMP, pred, lhs, rhs, name, next){ } - -fcmp_inst* fcmp_inst::create(cmp_pred_t pred, value *lhs, value *rhs, const std::string &name, instruction *next){ - assert(is_fp_predicate(pred)); - type *res_ty = make_cmp_result_type(lhs->get_type()); - return new fcmp_inst(res_ty, pred, lhs, rhs, name, next); -} - -//===----------------------------------------------------------------------===// -// unary_inst classes -//===----------------------------------------------------------------------===// - -unary_inst::unary_inst(type *ty, value_id_t id, value *v, const std::string &name, instruction *next) - : instruction(ty, id, 1, name, next) { - set_operand(0, v); -} - -//===----------------------------------------------------------------------===// -// dequantize_inst classes -//===----------------------------------------------------------------------===// - -dequantize_inst::dequantize_inst(type *ty, value *v, value *scale, value *shift, const std::string &name, instruction *next) - : instruction(ty, INST_DEQUANTIZE, 3, name, next) { - set_operand(0, v); - set_operand(1, scale); - set_operand(2, shift); -} - -dequantize_inst *dequantize_inst::create(value *arg, value *scale, value *shift, type *ty, const std::string &name, instruction *next){ - return new dequantize_inst(ty, arg, scale, shift, name, next); -} - -//===----------------------------------------------------------------------===// -// cast_inst classes -//===----------------------------------------------------------------------===// - -std::string cast_inst::repr_impl() const { - switch (op_){ - case cast_op_t::Trunc: return "trunc"; - case cast_op_t::ZExt: return "zext"; - case cast_op_t::SExt: return "sext"; - case cast_op_t::FPTrunc: return "fp_trunc"; - case cast_op_t::FPExt: return "fp_ext"; - case cast_op_t::UIToFP: return "ui_to_fp"; - case cast_op_t::SIToFP: return "si_to_fp"; - case cast_op_t::FPToUI: return "fp_to_ui"; - case cast_op_t::FPToSI: return "fp_to_si"; - case cast_op_t::PtrToInt: return "ptr_to_int"; - case cast_op_t::IntToPtr: return "int_to_ptr"; - case cast_op_t::BitCast: return "bitcast"; - case cast_op_t::AddrSpaceCast: return "addr_space_cast"; - default: throw std::runtime_error("unreachable"); - } -} -// TODO -bool cast_inst::is_valid(cast_op_t op, value *arg, type *ty) { - assert(arg->get_type()->is_block_ty() == ty->is_block_ty()); - return true; -} - -cast_inst *cast_inst::create(cast_op_t op, value *arg, type *ty, const std::string &name, instruction *next){ - assert(is_valid(op, arg, ty) && "Invalid cast!"); - // Construct and return the appropriate CastInst subclass - switch (op) { - case cast_op_t::Trunc: return new trunc_inst (ty, arg, name, next); - case cast_op_t::ZExt: return new z_ext_inst (ty, arg, name, next); - case cast_op_t::SExt: return new s_ext_inst (ty, arg, name, next); - case cast_op_t::FPTrunc: return new fp_trunc_inst (ty, arg, name, next); - case cast_op_t::FPExt: return new fp_ext_inst (ty, arg, name, next); - case cast_op_t::UIToFP: return new ui_to_fp_inst (ty, arg, name, next); - case cast_op_t::SIToFP: return new si_to_fp_inst (ty, arg, name, next); - case cast_op_t::FPToUI: return new fp_to_ui_inst (ty, arg, name, next); - case cast_op_t::FPToSI: return new fp_to_si_inst (ty, arg, name, next); - case cast_op_t::PtrToInt: return new ptr_to_int_inst (ty, arg, name, next); - case cast_op_t::IntToPtr: return new int_to_ptr_inst (ty, arg, name, next); - case cast_op_t::BitCast: return new bit_cast_inst (ty, arg, name, next); - case cast_op_t::AddrSpaceCast: return new addr_space_cast_inst (ty, arg, name, next); - default: throw std::runtime_error("unreachable"); - } -} - -cast_inst *cast_inst::create_integer_cast(value *arg, type *ty, bool is_signed, const std::string &name, instruction *next){ - type *arg_ty = arg->get_type(); - assert(arg_ty->is_int_or_tileint_ty() && ty->is_int_or_tileint_ty() && "Invalid integer cast!"); - unsigned arg_bits = arg_ty->get_scalar_ty()->get_integer_bitwidth(); - unsigned dst_bits = ty->get_scalar_ty()->get_integer_bitwidth(); - cast_op_t op = (arg_bits == dst_bits ? cast_op_t::BitCast : - (arg_bits > dst_bits ? cast_op_t::Trunc : - (is_signed ? cast_op_t::SExt : cast_op_t::ZExt))); - return create(op, arg, ty, name, next); -} - -//===----------------------------------------------------------------------===// -// terminator_inst classes -//===----------------------------------------------------------------------===// - - -// return_inst -return_inst::return_inst(context &ctx, value *ret_val, instruction *next) - : terminator_inst(ret_val?ret_val->get_type():type::get_void_ty(ctx), INST_RETURN, ret_val!=nullptr, "", next){ - if(ret_val) - set_operand(0, ret_val); -} - -return_inst *return_inst::create(context &ctx, value *ret_val, instruction *next){ - return new return_inst(ctx, ret_val, next); -} - - -// branch_inst -branch_inst* branch_inst::create(basic_block *dst, instruction *next) { - assert(dst && "Branch destination may not be null!"); - return new uncond_branch_inst(dst, next); -} - -branch_inst* branch_inst::create(value *cond, basic_block *if_dst, basic_block *else_dst, instruction *next) { - assert(cond->get_type()->is_integer_ty(1) && "May only branch on boolean predicates!"); - return new cond_branch_inst(if_dst, else_dst, cond, next); -} - -// uncond_branch_inst -uncond_branch_inst::uncond_branch_inst(basic_block *dst, instruction *next) - : branch_inst(type::get_void_ty(dst->get_context()), INST_UNCOND_BRANCH, 1, "", next){ - set_operand(0, dst); -} - -// cond_branch_inst -cond_branch_inst::cond_branch_inst(basic_block *if_dst, basic_block *else_dst, value *cond, instruction *next) - : branch_inst(type::get_void_ty(if_dst->get_context()), INST_COND_BRANCH, 3, "", next){ - assert(cond->get_type()->is_integer_ty(1) && "May only branch on boolean predicates!"); - set_operand(0, if_dst); - set_operand(1, else_dst); - set_operand(2, cond); -} - - -//===----------------------------------------------------------------------===// -// getelementptr_inst classes -//===----------------------------------------------------------------------===// - -getelementptr_inst::getelementptr_inst(type *pointee_ty, value *ptr, const std::vector &idx, const std::string &name, instruction *next) - : instruction(get_return_type(pointee_ty, ptr, idx), INST_GETELEMENTPTR, 1 + idx.size(), name, next), - source_elt_ty(pointee_ty), - res_elt_ty(get_indexed_type(pointee_ty, idx)){ - // sanity check - type *expected_ty = get_type()->get_scalar_ty(); - expected_ty = ((pointer_type*)expected_ty)->get_element_ty(); - assert(res_elt_ty == expected_ty); - // set operands - set_operand(0, ptr); - for(size_t i = 0; i < idx.size(); i++) - set_operand(1 + i, idx[i]); -} - -type *getelementptr_inst::get_return_type(type *elt_ty, value *x, const std::vector &idx_list) { - // result pointer type - type *ty = x->get_type(); - unsigned addr_space = ty->get_scalar_ty()->get_pointer_address_space(); - type *ptr_ty = pointer_type::get(get_indexed_type(elt_ty, idx_list), addr_space); - // Tile GEP - if(ty->is_block_ty()) - return block_type::get_same_shapes(ptr_ty, ty); - for(value *idx : idx_list) - if (idx->get_type()->is_block_ty()) - return block_type::get_same_shapes(ptr_ty, ty); - // Scalar GEP - return ptr_ty; -} - -type *getelementptr_inst::get_indexed_type_impl(type *ty, const std::vector &idx_list) { - if(idx_list.empty()) - return ty; - if(!ty->is_sized()) - return nullptr; - unsigned cur_idx = 1; - for(; cur_idx != idx_list.size(); cur_idx++){ - composite_type *cty = dynamic_cast(ty); - if(!cty || cty->is_pointer_ty()) - break; - value *idx = idx_list[cur_idx]; - if(!cty->index_valid(idx)) - break; - ty = cty->get_type_at_index(idx); - } - return (cur_idx == idx_list.size())? ty : nullptr; -} - -type *getelementptr_inst::get_indexed_type(type *ty, const std::vector &idx_list) { - type *result = get_indexed_type_impl(ty, idx_list); - assert(result && "invalid GEP type!"); - return result; -} - -getelementptr_inst *getelementptr_inst::create(value *ptr, const std::vector &idx, const std::string &name, instruction *next) { - type *pointee_ty = ((pointer_type*)(ptr->get_type()->get_scalar_ty()))->get_element_ty(); - return new getelementptr_inst(pointee_ty, ptr, idx, name, next); -} - - -//===----------------------------------------------------------------------===// -// load_inst/store_inst classes -//===----------------------------------------------------------------------===// - -// io_inst -io_inst::io_inst(type *ty, value_id_t id, unsigned num_ops, EVICTION_POLICY eviction, const std::string &name, instruction *next) - : instruction(ty, id, num_ops, name, next), eviction_(eviction) -{ } - -// load_inst -load_inst::load_inst(value *ptr, value_id_t id, unsigned num_ops, load_inst::CACHE_MODIFIER cache, EVICTION_POLICY eviction, bool is_volatile, const std::string &name, instruction *next) - : io_inst(get_pointee_type(ptr->get_type()), id, num_ops, eviction, name, next), cache_(cache), is_volatile_(is_volatile) -{ } - -// load -type *load_inst::get_pointee_type(type *ty) { - type *scalar_ty = ty->get_scalar_ty(); - type *pointee_ty = scalar_ty->get_pointer_element_ty(); - if(ty->is_block_ty()) - return block_type::get_same_shapes(pointee_ty, ty); - return pointee_ty; -} - -// unmasked_load -unmasked_load_inst::unmasked_load_inst(value *ptr, load_inst::CACHE_MODIFIER cache,load_inst::EVICTION_POLICY eviction, bool is_volatile, const std::string &name, instruction *next) - : load_inst(ptr, INST_UNMASKED_LOAD, 1, cache, eviction, is_volatile, name, next) { - set_operand(0, ptr); -} - -unmasked_load_inst* unmasked_load_inst::create(value *ptr, load_inst::CACHE_MODIFIER cache, load_inst::EVICTION_POLICY eviction, bool is_volatile, const std::string &name, instruction *next) { - return new unmasked_load_inst(ptr, cache, eviction, is_volatile, name, next); -} - -// masked load -masked_load_inst::masked_load_inst(value *ptr, value *mask, value *false_value, load_inst::CACHE_MODIFIER cache, load_inst::EVICTION_POLICY eviction, - bool is_volatile, - const std::string &name, instruction *next) - : load_inst(ptr, INST_MASKED_LOAD, 3, cache, eviction, is_volatile, name, next) { - set_operand(0, ptr); - set_operand(1, mask); - set_operand(2, false_value); -} - -masked_load_inst* masked_load_inst::create(value *ptr, value *mask, value *false_value, - load_inst::CACHE_MODIFIER cache, load_inst::EVICTION_POLICY eviction, - bool is_volatile, - const std::string &name, instruction *next) { - return new masked_load_inst(ptr, mask, false_value, cache, eviction, is_volatile, name, next); -} - -// masked load async -masked_load_async_inst::masked_load_async_inst(value *ptr, value *mask, value *false_value, - load_inst::CACHE_MODIFIER cache, load_inst::EVICTION_POLICY eviction, - const std::string &name, instruction *next) - : load_inst(ptr, INST_MASKED_LOAD_ASYNC, 3, cache, eviction, false, name, next) { - set_operand(0, ptr); - set_operand(1, mask); - set_operand(2, false_value); -} - -masked_load_async_inst* masked_load_async_inst::create(value *ptr, value *mask, value *false_value, - load_inst::CACHE_MODIFIER cache, EVICTION_POLICY eviction, - const std::string &name, instruction *next) { - return new masked_load_async_inst(ptr, mask, false_value, cache, eviction, name, next); -} - -// store - -store_inst::store_inst(value *ptr, value_id_t id, unsigned num_ops, EVICTION_POLICY eviction, const std::string &name, instruction *next) - : io_inst(type::get_void_ty(ptr->get_type()->get_context()), id, num_ops, eviction, name, next) -{ } - -// unmasked_store -unmasked_store_inst::unmasked_store_inst(value *ptr, value *val, EVICTION_POLICY eviction, - const std::string &name, instruction *next) - : store_inst(ptr, INST_UNMASKED_STORE, 2, eviction, name, next) { - set_operand(0, ptr); - set_operand(1, val); -} - -unmasked_store_inst* unmasked_store_inst::create(value *ptr, value *val, EVICTION_POLICY eviction, - const std::string &name, instruction *next) { - return new unmasked_store_inst(ptr, val, eviction, name, next); -} - -// masked store -masked_store_inst::masked_store_inst(value *ptr, value *val, value *mask, EVICTION_POLICY eviction, - const std::string &name, instruction *next) - : store_inst(ptr, INST_MASKED_STORE, 3, eviction, name, next) { - set_operand(0, ptr); - set_operand(1, val); - set_operand(2, mask); -} - -masked_store_inst* masked_store_inst::create(value *ptr, value *val, value *mask, EVICTION_POLICY eviction, - const std::string &name, instruction *next) { - return new masked_store_inst(ptr, val, mask, eviction, name, next); -} - -//===----------------------------------------------------------------------===// -// struct classes -//===----------------------------------------------------------------------===// - -// insert value - -insert_value_inst::insert_value_inst(value *val, value *elt, size_t idx, const std::string& name, instruction *next) - : instruction(val->get_type(), INST_INSERT_VALUE, 2, name, next), idx_(idx) { - set_operand(0, val); - set_operand(1, elt); -} - -insert_value_inst* insert_value_inst::create(value *val, value *elt, size_t idx, const std::string& name, instruction *next){ - return new insert_value_inst(val, elt, idx, name, next); -} - - -// extract value - -extract_value_inst::extract_value_inst(value *val, size_t idx, const std::string& name, instruction *next) - : instruction(val->get_type()->get_struct_type(idx), INST_EXTRACT_VALUE, 1, name, next), idx_(idx) { - set_operand(0, val); -} - -extract_value_inst* extract_value_inst::create(value *val, size_t idx, const std::string& name, instruction *next){ - return new extract_value_inst(val, idx, name, next); -} - - -//===----------------------------------------------------------------------===// -// retile_inst classes -//===----------------------------------------------------------------------===// - -// cat - -cat_inst::cat_inst(value *x, value *y, const std::string &name, instruction *next) - : instruction(block_type::get(x->get_type()->get_scalar_ty(), - {x->get_type()->get_block_shapes()[0] + - y->get_type()->get_block_shapes()[0] }), INST_CAT, 2, name, next) { - set_operand(0, x); - set_operand(1, y); -} - -instruction* cat_inst::create(value *lhs, value *rhs, const std::string &name, instruction *next) { - return new cat_inst(lhs, rhs, name, next); -} - -// retile - -retile_inst::retile_inst(value *arg, value_id_t id, const type::block_shapes_t &shapes, - const std::string &name, instruction *next) - : unary_inst(block_type::get(arg->get_type()->get_scalar_ty(), shapes), id, arg, name, next) { } - - - -// reshape - -instruction* reshape_inst::create(value *arg, const type::block_shapes_t &shapes, - const std::string &name, instruction *next) { - return new reshape_inst(arg, INST_RESHAPE, shapes, name, next); -} - - -// splat - -instruction* splat_inst::create(value *arg, const type::block_shapes_t &shapes, - const std::string &name, instruction *next) { - return new splat_inst(arg, INST_SPLAT, shapes, name, next); -} - -// broadcast - -instruction* broadcast_inst::create(value *arg, const type::block_shapes_t &shapes, - const std::string &name, instruction *next) { - return new broadcast_inst(arg, INST_BROADCAST, shapes, name, next); -} - -// downcast - -instruction* downcast_inst::create(value *arg, const std::string &name, instruction *next) { - return new downcast_inst(arg->get_type()->get_scalar_ty(), INST_DOWNCAST, arg, name, next); -} - - - - -//===----------------------------------------------------------------------===// -// matmul_inst classes -//===----------------------------------------------------------------------===// - -dot_inst::dot_inst(value *A, value *B, value *C, TransT AT, TransT BT, bool allow_tf32, - const std::string &name, instruction *next) - : builtin_inst(C->get_type(), INST_DOT, 3, name, next), AT_(AT), BT_(BT){ - set_operand(0, A); - set_operand(1, B); - set_operand(2, C); - allow_tf32_ = allow_tf32; -} - -instruction *dot_inst::create(value *A, value *B, value *C, - bool AT, bool BT, bool allow_tf32, - const std::string &name, instruction *next) { - TransT OPA = AT ? Trans : NoTrans; - TransT OPB = BT ? Trans : NoTrans; - return new dot_inst(A, B, C, OPA, OPB, allow_tf32, name, next); -} - -instruction *dot_inst::create_nn(value *A, value *B, value *C, bool allow_tf32, - const std::string &name, instruction *next) { - return new dot_inst(A, B, C, NoTrans, NoTrans, allow_tf32, name, next); -} - -instruction *dot_inst::create_nt(value *A, value *B, value *C, bool allow_tf32, - const std::string &name, instruction *next) { - return new dot_inst(A, B, C, NoTrans, Trans, allow_tf32, name, next); -} - -instruction *dot_inst::create_tn(value *A, value *B, value *C, bool allow_tf32, - const std::string &name, instruction *next) { - return new dot_inst(A, B, C, Trans, NoTrans, allow_tf32, name, next); -} - -instruction *dot_inst::create_tt(value *A, value *B, value *C, bool allow_tf32, - const std::string &name, instruction *next) { - return new dot_inst(A, B, C, Trans, Trans, allow_tf32, name, next); -} - -//===----------------------------------------------------------------------===// -// trans instructions -//===----------------------------------------------------------------------===// - -ir::type* trans_inst::get_res_ty(ir::type* ty, std::vector perm) { - // get argument shapes - ir::block_type::block_shapes_t arg_shapes = ty->get_block_shapes(); - // permutate argument shapes - perm = init_perm(ty, perm); - ir::block_type::block_shapes_t res_shapes = arg_shapes; - for(size_t i = 0; i < perm.size(); i++) - res_shapes[i] = arg_shapes[perm[i]]; - // construct type - return block_type::get(ty->get_scalar_ty(), res_shapes); -} - -std::vector trans_inst::init_perm(ir::type* ty, const std::vector& perm) { - if(!perm.empty()) - return perm; - auto size = ty->get_block_shapes().size(); - std::vector result; - result.push_back(size - 1); - for(size_t i = 0; i < size - 1; i++) - result.push_back(i); - return result; -} - -trans_inst::trans_inst(value *arg, const std::vector &perm, const std::string &name, instruction *next) - : builtin_inst(get_res_ty(arg->get_type(), perm), INST_TRANS, 1, name, next) { - // sanity check - perm_ = init_perm(arg->get_type(), perm); - //auto size = arg->get_type()->get_tile_shapes().size(); - //assert(perm_.size() == size); - set_operand(0, arg); -} - -instruction* trans_inst::create(value *arg, const std::vector &perm, const std::string &name, instruction *next) { - return new trans_inst(arg, perm, name, next); -} - -const std::vector trans_inst::get_perm() const { - return perm_; -} - -//===----------------------------------------------------------------------===// -// sqrt instructions -//===----------------------------------------------------------------------===// - -sqrt_inst::sqrt_inst(value *arg, const std::string &name, instruction *next) - : builtin_inst(arg->get_type(), INST_SQRT, 1, name, next){ - set_operand(0, arg); -} - -instruction* sqrt_inst::create(value *arg, const std::string &name, instruction *next) { - return new sqrt_inst(arg, name, next); -} - -//===----------------------------------------------------------------------===// -// reduce instructions -//===----------------------------------------------------------------------===// - -std::string reduce_inst::to_str(op_t op) { - switch (op) { - case ADD: return "+"; - case SUB: return "-"; - case MAX: return "imax"; - case MIN: return "imin"; - case FADD: return "+"; - case FSUB: return "-"; - case FMAX: return "fmax"; - case FMIN: return "fmin"; - default: break; - } - assert(false); - return ""; -} - -type* reduce_inst::get_res_type(value *arg, unsigned axis) { - ir::block_type::block_shapes_t shapes = arg->get_type()->get_block_shapes(); - shapes.erase(shapes.begin() + axis); - type *scalar_ty = arg->get_type()->get_scalar_ty(); - if(shapes.empty()) -// shapes.push_back(1); - return scalar_ty; - return block_type::get(scalar_ty, shapes); -} - -reduce_inst::reduce_inst(value *arg, op_t op, unsigned axis, const std::string &name, instruction *next) - : builtin_inst(get_res_type(arg, axis), INST_REDUCE, 1, name, next), - op_(op), - axis_(axis){ - set_operand(0, arg); -} - -instruction* reduce_inst::create(value *arg, op_t op, unsigned axis, const std::string &name, instruction *next) { - return new reduce_inst(arg, op, axis, name, next); -} - - -//===----------------------------------------------------------------------===// -// select instructions -//===----------------------------------------------------------------------===// - -select_inst::select_inst(value *pred, value *if_value, value *else_value, const std::string &name, instruction *next) - : builtin_inst(if_value->get_type(), INST_SELECT, 3, name, next){ - set_operand(0, pred); - set_operand(1, if_value); - set_operand(2, else_value); -} - -instruction* select_inst::create(value *pred, value *if_value, value *else_value, const std::string &name, instruction *next) { - return new select_inst(pred, if_value, else_value, name, next); -} -//===----------------------------------------------------------------------===// -// builtin instructions -//===----------------------------------------------------------------------===// - - -// get_program_id -get_program_id_inst::get_program_id_inst(type *ty, unsigned axis, const std::string &name, instruction *next) - : builtin_inst(ty, INST_GET_PROGRAM_ID, 0, name, next), axis_(axis){ - -} - -instruction* get_program_id_inst::create(context &ctx, unsigned axis, const std::string &name, instruction *next) { - return new get_program_id_inst(type::get_int32_ty(ctx), axis, name, next); -} - -// get_num_program -get_num_programs_inst::get_num_programs_inst(type *ty, unsigned axis, const std::string &name, instruction *next) - : builtin_inst(ty, INST_GET_NUM_PROGRAMS, 0, name, next), axis_(axis){ - -} - -instruction* get_num_programs_inst::create(context &ctx, unsigned axis, const std::string &name, instruction *next) { - return new get_num_programs_inst(type::get_int32_ty(ctx), axis, name, next); -} - -// atomic_rmw - -atomic_rmw_inst::atomic_rmw_inst(atomic_rmw_op_t op, value *ptr, value *val, value *msk, const std::string &name, instruction *next) - : atomic_inst(ptr->get_type()->get_pointer_element_ty(), INST_ATOMIC_RMW, 3, name, next), op_(op) { - set_operand(0, ptr); - set_operand(1, val); - set_operand(2, msk); -} - -instruction* atomic_rmw_inst::create(atomic_rmw_op_t op, value *ptr, value *val, value *msk, const std::string &name, instruction *next) { - return new atomic_rmw_inst(op, ptr, val, msk, name, next); -} - - -// atomic cas - -atomic_cas_inst::atomic_cas_inst(value *ptr, value *cmp, value *val, const std::string &name, instruction *next) - : atomic_inst(ptr->get_type()->get_pointer_element_ty(), INST_ATOMIC_CAS, 3, name, next) { - set_operand(0, ptr); - set_operand(1, cmp); - set_operand(2, val); -} - -instruction* atomic_cas_inst::create(value *ptr, value *cmp, value *val, const std::string &name, instruction *next) { - return new atomic_cas_inst(ptr, cmp, val, name, next); -} - - -// umulhi - -umulhi_inst::umulhi_inst(value *lhs, value *rhs, const std::string &name, instruction *next) - : builtin_inst(lhs->get_type(), INST_UMULHI, 2, name, next) { - set_operand(0, lhs); - set_operand(1, rhs); -} - -instruction* umulhi_inst::create(value *lhs, value *rhs, const std::string &name, instruction *next) { - return new umulhi_inst(lhs, rhs, name, next); -} - - -// exp - -exp_inst::exp_inst(value *val, const std::string &name, instruction *next) - : builtin_inst(val->get_type(), INST_EXP, 1, name, next) { - set_operand(0, val); -} - -instruction* exp_inst::create(value *val, const std::string& name, instruction *next) { - return new exp_inst(val, name, next); -} - -// cos -cos_inst::cos_inst(value *val, const std::string &name, instruction *next) - : builtin_inst(val->get_type(), INST_COS, 1, name, next) { - set_operand(0, val); -} - -instruction* cos_inst::create(value *val, const std::string& name, instruction *next) { - return new cos_inst(val, name, next); -} - -// sin -sin_inst::sin_inst(value *val, const std::string &name, instruction *next) - : builtin_inst(val->get_type(), INST_SIN, 1, name, next) { - set_operand(0, val); -} - -instruction* sin_inst::create(value *val, const std::string& name, instruction *next) { - return new sin_inst(val, name, next); -} - - -// log - -log_inst::log_inst(value *val, const std::string &name, instruction *next) - : builtin_inst(val->get_type(), INST_LOG, 1, name, next) { - set_operand(0, val); -} - -instruction* log_inst::create(value *val, const std::string& name, instruction *next) { - return new log_inst(val, name, next); -} - - -//===----------------------------------------------------------------------===// -// intrinsic instructions -//===----------------------------------------------------------------------===// - -// cvt_scanline -cvt_layout_inst* cvt_layout_inst::create(value *arg, const std::string &name, instruction *next) { - return new cvt_layout_inst(arg->get_type(), INST_CVT_LAYOUT, arg, name, next); -} - -// copy to shared -copy_to_shared_inst* copy_to_shared_inst::create(value *arg, const std::string &name, - instruction *next) { - return new copy_to_shared_inst(arg->get_type(), INST_COPY_TO_SHARED, arg, name, next); -} - -// copy from shared -copy_from_shared_inst* copy_from_shared_inst::create(value *arg, const std::string &name, - instruction *next) { - return new copy_from_shared_inst(arg->get_type(), INST_COPY_FROM_SHARED, arg, name, next); -} - -// barrier -barrier_inst::barrier_inst(context &ctx, const std::string &name, instruction *next) - : instruction(type::get_void_ty(ctx), INST_BARRIER, 0, name, next) { } - -barrier_inst* barrier_inst::create(context &ctx, const std::string &name, instruction *next) { - return new barrier_inst(ctx, name, next); -} - -async_wait_inst::async_wait_inst(context &ctx, int N, const std::string &name, instruction *next) - : instruction(type::get_void_ty(ctx), INST_ASYNC_WAIT, 0, name, next), N_(N) { } - -async_wait_inst* async_wait_inst::create(context &ctx, int N, const std::string &name, instruction *next) { - return new async_wait_inst(ctx, N, name, next); -} - -// prefetch_s -prefetch_s_inst *prefetch_s_inst::create(context &ctx, value *arg, int inc, const std::string &name, instruction *next) { - return new prefetch_s_inst(ctx, arg, inc, name, next); -} - -// global timer -globaltimer_inst::globaltimer_inst(context &ctx, const std::string &name, instruction *next) - : instruction(type::get_int64_ty(ctx), INST_GLOBALTIMER, 0, name, next) { } - -globaltimer_inst* globaltimer_inst::create(context &ctx, const std::string &name, instruction *next) { - return new globaltimer_inst(ctx, name, next); -} - -// extern elementwise -extern_elementwise_inst::extern_elementwise_inst( - context &ctx, const std::vector &args, type *ret_ty, - const std::string &lib_name, const std::string &lib_path, - const std::string &symbol_name, const std::string &name, instruction *next) - : instruction(ret_ty, INST_EXTERN_ELEMENTWISE, args.size(), name, next), - lib_name_(lib_name), - lib_path_(lib_path), - symbol_name_(symbol_name) { - for (size_t i = 0; i < args.size(); i++) { - set_operand(i, args[i]); - } -} - -extern_elementwise_inst *extern_elementwise_inst::create( - context &ctx, const std::vector &args, type *ret_ty, - const std::string &lib_name, const std::string &lib_path, - const std::string &symbol_name, const std::string &name, - instruction *next) { - return new extern_elementwise_inst(ctx, args, ret_ty, lib_name, lib_path, - symbol_name, name, next); -} - -// clock -clock_inst::clock_inst(context &ctx, const std::string &name, instruction *next) - : instruction(type::get_int64_ty(ctx), INST_CLOCK, 0, name, next) { } - -clock_inst* clock_inst::create(context &ctx, const std::string &name, instruction *next) { - return new clock_inst(ctx, name, next); -} - - -// make_range -make_range::make_range(type *ty, constant_int *first, constant_int *last) - : instruction(ty, INST_MAKE_RANGE, 0), first_(first), last_(last){ } - -make_range *make_range::create(constant_int *first, constant_int *last) { - assert(first->get_type()->is_integer_ty()); - assert(first->get_type() == last->get_type()); -// assert(((constant_int*)first)->get_value() == 0); - type *ty = block_type::get(first->get_type(), {(unsigned)last->get_value() - (unsigned)first->get_value()}); - return new make_range(ty, first, last); -} - -const constant_int* make_range::get_first() const { - return first_; -} - -const constant_int* make_range::get_last() const { - return last_; -} - -} -} diff --git a/lib/ir/metadata.cc b/lib/ir/metadata.cc deleted file mode 100644 index 9d31963c229b..000000000000 --- a/lib/ir/metadata.cc +++ /dev/null @@ -1,14 +0,0 @@ -#include "triton/ir/metadata.h" - -namespace triton{ -namespace ir{ - -metadata::metadata(kind_t kind, std::vector value) - : kind_(kind), value_(value) { } - -metadata* metadata::get(kind_t kind, std::vector value) { - return new metadata(kind, value); -} - -} -} diff --git a/lib/ir/module.cc b/lib/ir/module.cc deleted file mode 100644 index d86b60085532..000000000000 --- a/lib/ir/module.cc +++ /dev/null @@ -1,27 +0,0 @@ -#include -#include -#include "triton/ir/basic_block.h" -#include "triton/ir/module.h" -#include "triton/ir/type.h" -#include "triton/ir/constant.h" -#include "triton/ir/function.h" - -namespace triton{ -namespace ir{ - -void module::reset_ret_ty(const std::string& name, type* ty) { - get_function(name)->get_fn_type()->reset_ret_ty(ty); -} - -/* functions */ -function *module::get_or_insert_function(const std::string &name, function_type *ty) { - function *&fn = (function*&)symbols_[name]; - if(fn == nullptr){ - fn = function::create(ty, global_value::external, name, this); - } - return fn; -} - - -} -} diff --git a/lib/ir/print.cc b/lib/ir/print.cc deleted file mode 100644 index 4b6e3266f15d..000000000000 --- a/lib/ir/print.cc +++ /dev/null @@ -1,450 +0,0 @@ -#include -#include "triton/ir/basic_block.h" -#include "triton/ir/module.h" -#include "triton/ir/type.h" -#include "triton/ir/value.h" -#include "triton/ir/constant.h" -#include "triton/ir/function.h" -#include "triton/ir/instructions.h" -#include "triton/ir/print.h" - -#include -#include - -namespace triton{ -namespace ir{ - -namespace { -class SlotTracker { - // A mapping of values to slot numbers. - using value_map = std::map; - - // The module for which we are holding slot numbers. - const module *mod_; - bool module_processed = false; - - // The function for which we are holding slot numbers. - const function *func_ = nullptr; - bool function_processed = false; - - // m_map - The slot map for the module level data. - value_map m_map; - unsigned m_next = 0; - - // f_map - The slot map for the function level data. - value_map f_map; - unsigned f_next = 0; - -public: - // Construct from a module - explicit SlotTracker(const module *mod) : mod_(mod) {} - - // Construct from a function - explicit SlotTracker(const function *f) - : mod_(f? f->get_parent() : nullptr), func_(f) {} - - // Return the slot number of the specified value. If something is not in - // the SlotTracker, return -1 - int get_local_slot(const value *v); - - void initialize_if_needed(); - - // If you'd like to deal with a function instead of just a module, use - // this method to get its data into the SlotTracker - void incorporate_function(const function *f) { - func_ = f; - function_processed = false; - } - -private: - // Add all of the module level global variables (and their initializers) - // and function declarations, but not contents of those functions. - void process_module(); - - // Add all of the functions arguments, basic blocks, and instructions. - void process_function(); - - // Insert specified value* into the slot table - void create_function_slot(const value *v); -}; - -class AssemblyWriter { - std::ostream &os; - SlotTracker &slot_tracker; - -public: - AssemblyWriter(std::ostream &os, SlotTracker &slot_tracker) - : os(os), slot_tracker(slot_tracker) {} - - void print_module(const module *mod); - void print_function(const function *f); - void print_argument(const argument *arg); - void print_basic_block(const basic_block *bb); - void print_instruction(const instruction *instr); - void print_value(const value *v); - - void write_operand(const value *op, bool print_type = false); -}; -} // anonymous namespace - -//------------------------- -// SlotTracker -//------------------------- -void SlotTracker::process_module() { - // Nothing to do at the moment. - // Create slots for global variable & unnamed functions & ... - module_processed = true; -} - -void SlotTracker::process_function() { - f_next = 0; - - // Add all the function arguments with no names. - for (const argument *arg : func_->args()) - if (!arg->has_name()) - create_function_slot(arg); - - // Add all of the basic blocks and instructions with no names. - for (const basic_block *bb : func_->blocks()) { - if (!bb->has_name()) - create_function_slot(bb); - - for (const instruction *instr : bb->get_inst_list()) { - if (!instr->get_type()->is_void_ty() && !instr->has_name()) - create_function_slot(instr); - } - } - - function_processed = true; -} - -void SlotTracker::create_function_slot(const value *v) { - assert(!v->get_type()->is_void_ty() && !v->has_name() && "Doesn't need a slot"); - - unsigned dst_slot = f_next++; - f_map[v] = dst_slot; -} - -int SlotTracker::get_local_slot(const value *v) { - assert(dynamic_cast(v) == nullptr && "Can't get a constant slot"); - - // Check for uninitialized state and do lazy initialization. - initialize_if_needed(); - - value_map::iterator f_iter = f_map.find(v); - return f_iter == f_map.end() ? -1 : (int)f_iter->second; -} - -void SlotTracker::initialize_if_needed() { - if (mod_ && !module_processed) - process_module(); - - if (func_ && !function_processed) - process_function(); -} - - -//------------------------------- -// AssemblyWriter -//------------------------------- -void AssemblyWriter::write_operand(const value *operand, bool print_type) { - if (!operand) { - os << ""; - return; - } - - if (auto *c = dynamic_cast(operand)) { - os << c->repr(); - return; - } - - if (operand->has_name()) { - os << operand->get_name(); - return; - } - - // Print the normal way - int slot_num = slot_tracker.get_local_slot(operand); - - if (slot_num != -1) - os << "%" << slot_num; - else - os << ""; -} - -void AssemblyWriter::print_module(const module *mod) { - slot_tracker.initialize_if_needed(); - // ;ModuleID = ... - // source_filename = ... - - // Print all of the functions. - for (function *f : mod->get_function_list()) { - os << "\n"; - print_function(f); - } -} - -void AssemblyWriter::print_function(const function *f) { - // Annotation & Attributes - - slot_tracker.incorporate_function(f); - - os << "def "; - ir::type *rt_type = f->get_fn_type()->get_return_ty(); - // Functions must have names. - os << rt_type->repr() << " " << f->get_name() << "("; - // Print arguments - for (ir::argument *arg : f->args()) { - if (arg->get_arg_no() > 0) - os << ", "; - print_argument(arg); - } - os << ")"; - - // Print function body - os << "{"; - for (const basic_block *bb : f->blocks()) - print_basic_block(bb); - os << "}\n"; -} - -void AssemblyWriter::print_argument(const argument *arg) { - // Print type - os << arg->get_type()->repr(); - - // Print name, if available. - if (arg->has_name()) - os << " " << arg->get_name(); - else { - int slot_num = slot_tracker.get_local_slot(arg); - assert(slot_num != -1 && "expect argument in function here"); - os << " %" << slot_num; - } - - // Print attributes - std::set attrs = arg->get_parent()->get_attributes(arg); - for (attribute attr : attrs) - os << " " << attr.repr(); -} - -void AssemblyWriter::print_basic_block(const basic_block *bb) { - // bb label - if (bb->has_name()) { - os << "\n"; - os << bb->get_name() << ":"; - } else { - os << "\n"; - int slot_num = slot_tracker.get_local_slot(bb); - if (slot_num != -1) - os << slot_num << ":"; - else - os << ":"; - } - - // Print predecessors for the block - auto const &predecessors = bb->get_predecessors(); - if (!predecessors.empty()) { - os << std::setw(50) << std::setfill(' ') - << "; preds = "; - for (size_t i=0; iget_inst_list()) - print_instruction(instr); -} - -void AssemblyWriter::print_instruction(const instruction *instr) { - // Print out indentation for an instruction. - os << " "; - - ir::type *type = instr->get_type(); - if (instr->has_name()) { - os << instr->get_name(); - os << " = "; - } else if (!type->is_void_ty()) { - // Print out the def slot taken. - int slot_num = slot_tracker.get_local_slot(instr); - if (slot_num == -1) - os << " = "; - else - os << "%" << slot_num << " = "; - } - - // Print out opcode - os << instr->repr() << " " << type->repr(); - - size_t num_ops = instr->get_num_operands(); - if (num_ops > 0) - os << " "; - ir::instruction::ops_t ops = instr->ops(); - for (unsigned i = 0; i < num_ops; ++i) { - if (i) - os << ", "; - write_operand(ops[i]); - } - - os << ";\n"; -} - -void AssemblyWriter::print_value(const value *v) { - // Not implemented -} - - -//------------------------------- -// External interface -//------------------------------- -void module::print(std::ostream &os) { - SlotTracker slot_tracker(this); - AssemblyWriter writer(os, slot_tracker); - writer.print_module(this); -} - -void function::print(std::ostream &os) { - SlotTracker slot_tracker(this); - AssemblyWriter writer(os, slot_tracker); - writer.print_function(this); -} - -void basic_block::print(std::ostream &os) { - SlotTracker slot_tracker(this->get_parent()); - AssemblyWriter writer(os, slot_tracker); - writer.print_basic_block(this); -} - -void instruction::print(std::ostream &os) { - SlotTracker slot_tracker(this->get_parent()->get_parent()); - AssemblyWriter writer(os, slot_tracker); - writer.print_instruction(this); -} - -//------------------------------- -// legacy print interface -//------------------------------- -std::string get_name(ir::value *v, unsigned i) { - if(v->get_name().empty()){ - std::string name = "%" + std::to_string(i); - v->set_name(name); - } - return v->get_name(); -} - - -void print(module &mod, std::ostream& os) { - unsigned cnt = 0; - for(ir::function *fn: mod.get_function_list()){ - os << "def " << fn->get_fn_type()->get_return_ty()->repr() << " " << fn->get_name() << "(" ; - for(ir::argument* arg: fn->args()) { - if(arg->get_arg_no() > 0) - os << ", "; - os << arg->get_type()->repr() << " " << arg->get_name(); - auto attrs = fn->get_attributes(arg); - if(attrs.size() > 0) - os << " "; - for(ir::attribute attr: attrs) - os << attr.repr() << " "; - } - os << ")" << std::endl; - os << "{" << std::endl; - for(ir::basic_block *block: fn->blocks()){ - auto const &predecessors = block->get_predecessors(); - os << block->get_name() << ":"; - if(!predecessors.empty()){ - os << " "; - os << "; preds = "; - auto const &predecessors = block->get_predecessors(); - for(ir::basic_block *pred: predecessors) - os << pred->get_name() << (pred!=predecessors.back()?", ":""); - } - os << std::endl; - for(ir::instruction *inst: block->get_inst_list()){ - os << " "; - if(!inst->get_type()->is_void_ty()){ - os << get_name(inst, cnt++); - os << " = "; - } - ir::type* type = inst->get_type(); - os << inst->repr() << " " << type->repr(); - ir::instruction::ops_t ops = inst->ops(); - size_t num_ops = inst->get_num_operands(); - if(num_ops > 0) - os << " ";; - for(unsigned i = 0; i < num_ops; i++){ - if(auto *x = dynamic_cast(ops[i])) - os << x->repr(); - else - os << get_name(ops[i], cnt++); - os << (i < num_ops - 1?", ":""); - } - os << ";"; -// os << " ("; -// for(ir::user* usr: inst->get_users()) -// os << get_name(usr, cnt++) << ", " ; -// os << " )"; - os << std::endl; - } - } - os << "}" << std::endl; - } -} - -void print(function &fn, std::ostream &os) { - // -} - -void print(basic_block &bb, std::ostream &os) { - auto const &predecessors = bb.get_predecessors(); - os << bb.get_name() << ":"; - if(!predecessors.empty()){ - os << " "; - os << "; preds = "; - auto const &predecessors = bb.get_predecessors(); - for(ir::basic_block *pred: predecessors) - os << pred->get_name() << (pred!=predecessors.back()?", ":""); - } - os << std::endl; - for(ir::instruction *inst: bb.get_inst_list()){ - print(*inst, os); - } -} - -void print(instruction &instr, std::ostream &os) { - instruction *inst = &instr; - os << " "; - if(!inst->get_type()->is_void_ty()){ - os << instr.get_name(); - os << " = "; - } - ir::type* type = inst->get_type(); - os << inst->repr() << " " << type->repr(); - ir::instruction::ops_t ops = inst->ops(); - size_t num_ops = inst->get_num_operands(); - if(num_ops > 0) - os << " ";; - for(unsigned i = 0; i < num_ops; i++){ - if(auto *x = dynamic_cast(ops[i])) - os << x->repr(); - else - os << ops[i]->get_name(); - os << (i < num_ops - 1?", ":""); - } - os << ";"; -// os << " ("; -// for(ir::user* usr: inst->get_users()) -// os << get_name(usr, cnt++) << ", " ; -// os << " )"; - os << std::endl; -} - - -} -} diff --git a/lib/ir/type.cc b/lib/ir/type.cc deleted file mode 100644 index 5667dcac54bb..000000000000 --- a/lib/ir/type.cc +++ /dev/null @@ -1,252 +0,0 @@ -#include -#include -#include "triton/ir/type.h" -#include "triton/ir/context.h" -#include "triton/ir/context_impl.h" -#include "triton/ir/value.h" -#include "triton/ir/constant.h" - -namespace triton{ -namespace ir{ - -//===----------------------------------------------------------------------===// -// type class -//===----------------------------------------------------------------------===// - -// attributes -type *type::get_scalar_ty() const { - if(is_block_ty()) - return get_tile_element_ty(); - return const_cast(this); -} - -unsigned type::get_primitive_size_in_bits() const { - switch (id_) { - case FP8TyID: return 8; - case FP16TyID: return 16; - case BF16TyID: return 16; - case FP32TyID: return 32; - case FP64TyID: return 64; - case IntegerTyID: return std::max(8, ((integer_type*)(this))->get_bitwidth()); - case BlockTyID: return ((block_type*)(this))->get_bitwidth(); - default: return 0; - } -} - -unsigned type::get_integer_bitwidth() const -{ assert(id_ == IntegerTyID); return ((integer_type*)(this))->get_bitwidth(); } - -unsigned type::get_tile_bitwidth() const -{ return ((block_type*)(this))->get_bitwidth(); } - -unsigned type::get_fp_mantissa_width() const { - id_t id = get_scalar_ty()->id_; - assert(is_floating_point_ty() && "Not a floating point type!"); - if (id == FP8TyID) return 3; - if (id == FP16TyID) return 10; - if (id == BF16TyID) return 7; - if (id == FP32TyID) return 23; - if (id == FP64TyID) return 53; - throw std::runtime_error("unreachable"); -} - -type* type::get_tile_element_ty() const { - assert(is_block_ty()); - return contained_tys_[0]; -} - -unsigned type::get_pointer_address_space() const { - assert(is_pointer_ty()); - return ((pointer_type*)this)->get_address_space(); -} - -type * type::get_pointer_element_ty() const { - type *ptr_ty = get_scalar_ty(); - assert(ptr_ty->is_pointer_ty()); - type *scalar_ty = ((pointer_type*)ptr_ty)->get_element_ty(); - if(is_block_ty()) - return block_type::get_same_shapes(scalar_ty, (type*)this); - return scalar_ty; -} - - -type::block_shapes_t type::get_block_shapes() const { - assert(is_block_ty()); - return ((block_type*)this)->get_shapes(); -} - -const size_t type::get_tile_rank() const { - return get_block_shapes().size(); -} - -const size_t type::get_tile_ranks1() const { - int ret = 0; - for(int s: get_block_shapes()) - ret += s > 1; - return ret; -} - - -unsigned type::get_tile_num_elements() const { - const block_shapes_t& shapes = get_block_shapes(); - unsigned result = 1; - for(auto shape: shapes) - result *= shape; - return result; -} - - -// composite predicates -bool type::is_int_or_tileint_ty() -{ return get_scalar_ty()->is_integer_ty(); } - -bool type::is_integer_ty(unsigned width) const -{ return is_integer_ty() && get_integer_bitwidth()== width; } - - -bool type::is_floating_point_ty() const -{ return is_fp8_ty() || is_fp16_ty() || is_bf16_ty() || is_fp32_ty() || is_fp64_ty(); } - -bool type::is_sized() const { - // primitive types are sized - if(is_integer_ty() || is_floating_point_ty() || - is_pointer_ty()){ - return true; - } - // tile types are sizes - if(is_block_ty()) - return get_scalar_ty()->is_sized(); - return false; -} - -// primitive types -type *type::get_void_ty(context &ctx) { return &ctx.p_impl->void_ty; } -type *type::get_label_ty(context &ctx) { return &ctx.p_impl->label_ty; } -// floating point -type *type::get_fp8_ty(context &ctx) { return &ctx.p_impl->fp8_ty; } -type *type::get_fp16_ty(context &ctx) { return &ctx.p_impl->fp16_ty; } -type *type::get_bf16_ty(context &ctx) { return &ctx.p_impl->bf16_ty; } -type *type::get_fp32_ty(context &ctx) { return &ctx.p_impl->fp32_ty; } -type *type::get_fp64_ty(context &ctx) { return &ctx.p_impl->fp64_ty; } -// integer types -integer_type *type::get_int1_ty(context &ctx) { return &ctx.p_impl->int1_ty; } -integer_type *type::get_int8_ty(context &ctx) { return &ctx.p_impl->int8_ty; } -integer_type *type::get_int16_ty(context &ctx) { return &ctx.p_impl->int16_ty; } -integer_type *type::get_int32_ty(context &ctx) { return &ctx.p_impl->int32_ty; } -integer_type *type::get_int64_ty(context &ctx) { return &ctx.p_impl->int64_ty; } -integer_type *type::get_int128_ty(context &ctx) { return &ctx.p_impl->int128_ty; } - - - -pointer_type::pointer_type(type *ty, unsigned address_space) - : type(ty->get_context(), PointerTyID), address_space_(address_space){ - contained_tys_.push_back(ty); -} - -bool pointer_type::is_valid_elt_ty(type *ty){ - return !ty->is_void_ty() && !ty->is_label_ty() && - !ty->is_metadata_ty() && !ty->is_token_ty(); -} - -pointer_type* pointer_type::get(type *elt_ty, unsigned address_space){ - assert(elt_ty && "Can't get a pointer to type!"); - assert(is_valid_elt_ty(elt_ty) && "Invalid type for pointer element!"); - // look-up - context_impl *impl = elt_ty->get_context().p_impl.get(); - std::unique_ptr &entry = impl->ptr_tys[std::make_pair(elt_ty, address_space)]; - if(!entry) - entry.reset(new pointer_type(elt_ty, address_space)); - return entry.get(); -} - -//===----------------------------------------------------------------------===// -// composite_type class -//===----------------------------------------------------------------------===// - -type* composite_type::get_type_at_index(value *) const{ - assert(is_block_ty()); - return get_scalar_ty(); -} - -bool composite_type::index_valid(value *idx) const{ - assert(is_block_ty()); - return idx->get_type()->is_int_or_tileint_ty(); -} - -//===----------------------------------------------------------------------===// -// struct_type class -//===----------------------------------------------------------------------===// - -struct_type::struct_type(const contained_tys_vec_t& tys, bool is_packed) - : composite_type(tys[0]->get_context(), StructTyID), is_packed_(is_packed) { - contained_tys_ = tys; -} - -struct_type* struct_type::get(const contained_tys_vec_t& tys, bool is_packed) { - assert(tys.size()); - context_impl* impl = tys[0]->get_context().p_impl.get(); - struct_type *& entry = impl->struct_tys[tys]; - if(!entry) - entry = new struct_type(tys, is_packed); - return entry; -} - - -//===----------------------------------------------------------------------===// -// block_type class -//===----------------------------------------------------------------------===// - -block_type::block_type(type *ty, const block_shapes_t &shapes) - : composite_type(ty->get_context(), BlockTyID), shapes_(shapes) { - contained_tys_.push_back(ty); -} - -bool block_type::is_valid_elt_ty(type *ty) { - return ty->is_pointer_ty() || ty->is_floating_point_ty() || ty->is_integer_ty(); -} - -unsigned block_type::get_num_elements() const { - unsigned res = 1; - for(auto shape: shapes_) - res *= shape; - return res; -} - -unsigned block_type::get_bitwidth() const { - return get_num_elements() * get_tile_element_ty()->get_primitive_size_in_bits(); -} - -block_type* block_type::get(type *elt_ty, const block_shapes_t &shapes) { - assert(elt_ty && "Can't get a tile of type!"); - assert(shapes.size() && "Can't create a tile with empty shapes!"); - assert(is_valid_elt_ty(elt_ty) && "Invalid type for tile element!"); - // look-up - context_impl *impl = elt_ty->get_context().p_impl.get(); - std::unique_ptr &entry = impl->block_tys[std::make_pair(elt_ty, shapes)]; - if(!entry) - entry.reset(new block_type(elt_ty, shapes)); - return entry.get(); -} - -block_type* block_type::get_same_shapes(type *ty, type *ref){ - assert(ref->is_block_ty()); - return get(ty, ref->get_block_shapes()); -} - -//===----------------------------------------------------------------------===// -// function_type class -//===----------------------------------------------------------------------===// - -function_type::function_type(type *ret_ty, const std::vector ¶m_tys): - type(ret_ty->get_context(), FunctionTyID) { - contained_tys_.push_back(ret_ty); - for(type *ty: param_tys) - contained_tys_.push_back(ty); -} - -function_type* function_type::get(type *ret_ty, const std::vector ¶m_tys) { - return new function_type(ret_ty, param_tys); -} - -} -} diff --git a/lib/ir/utils.cc b/lib/ir/utils.cc deleted file mode 100644 index 9abaef5c08ab..000000000000 --- a/lib/ir/utils.cc +++ /dev/null @@ -1,77 +0,0 @@ -#include -#include -#include "triton/ir/utils.h" -#include "triton/ir/basic_block.h" -#include "triton/ir/function.h" -#include "triton/ir/module.h" - -namespace triton{ -namespace ir{ - -std::vector cfg::post_order(function* fn) { - std::stack stack; - std::set visited; - std::vector result; - // initialize stack - for(ir::basic_block* block: fn->blocks()) - if(block->get_predecessors().empty()){ - stack.push(block); - visited.insert(block); - } - // DFS - while(!stack.empty()) { - basic_block* current = stack.top(); - bool tail = true; - for(basic_block* succ: current->get_successors()) - if(visited.find(succ) == visited.end()){ - stack.push(succ); - visited.insert(succ); - tail = false; - break; - } - if(tail){ - stack.pop(); - result.push_back(current); - } - } - return result; -} - -std::vector cfg::reverse_post_order(function* fn) { - auto result = post_order(fn); - std::reverse(result.begin(), result.end()); - return result; -} - -void for_each_instruction_backward(module &mod, const std::function &do_work) { - for(ir::function *fn: mod.get_function_list()) - for(ir::basic_block *block: cfg::post_order(fn)){ - auto inst_list = block->get_inst_list(); - for(auto it = inst_list.rbegin(); it != inst_list.rend() ; it++) - do_work(*it); - } -} - -void for_each_instruction(module &mod, const std::function &do_work) { - for(ir::function *fn: mod.get_function_list()) - for(ir::basic_block *block: cfg::reverse_post_order(fn)) - for(ir::instruction *i: block->get_inst_list()) - do_work(i); -} - -void for_each_value(module &mod, const std::function &do_work) { - std::set seen; - for(ir::function *fn: mod.get_function_list()) - for(ir::basic_block *block: cfg::reverse_post_order(fn)) - for(ir::instruction *i: block->get_inst_list()){ - for(ir::value *op: i->ops()){ - if(seen.insert(op).second) - do_work(op); - } - if(seen.insert(i).second) - do_work(i); - } -} - -} -} diff --git a/lib/ir/value.cc b/lib/ir/value.cc deleted file mode 100644 index 251d644796cc..000000000000 --- a/lib/ir/value.cc +++ /dev/null @@ -1,82 +0,0 @@ -#include -#include -#include -#include "triton/ir/value.h" -#include "triton/ir/instructions.h" - -namespace triton{ -namespace ir{ - -class type; - -//===----------------------------------------------------------------------===// -// value class -//===----------------------------------------------------------------------===// - -value::value(type *ty, const std::string &name): ty_(ty){ - set_name(name); -} - -void value::add_use(user *arg) { - users_.push_back(arg); -} - -value::users_t::iterator value::erase_use(user *arg){ - auto it = std::find(users_.begin(), users_.end(), arg); - if(it == users_.end()) - return it; - return users_.erase(it); -} - -// TODO: automatic naming scheme + update symbol table -void value::set_name(const std::string &name){ - name_ = name; -} - -void value::replace_all_uses_with(value *target){ - for (auto it = users_.begin(); it != users_.end(); ) { - it = (*it)->replace_uses_of_with(this, target); - } -} - - -void visitor::visit_value(ir::value* v) { - v->accept(this); -} - - -//===----------------------------------------------------------------------===// -// user class -//===----------------------------------------------------------------------===// -void user::set_operand(unsigned i, value *x) { - assert(i < ops_.size() && "set_operand() out of range!"); - ops_[i] = x; - x->add_use(this); -} - -value* user::get_operand(unsigned i) const { - assert(i < ops_.size() && "get_operand() out of range!"); - return ops_[i]; -} - -unsigned user::get_num_operands() const { - return num_ops_; -} - -unsigned user::get_num_hidden() const { - return num_hidden_; -} - -value::users_t::iterator user::replace_uses_of_with(value *before, value *after) { - for(size_t i = 0; i < ops_.size(); i++) - if(ops_[i] == before){ - ops_[i] = after; - after->add_use(this); - } - return before->erase_use(this); -} - - - -} -} diff --git a/python/bench/README.md b/python/bench/README.md deleted file mode 100644 index 970c3a2a0706..000000000000 --- a/python/bench/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Run the benchmarks - -Install the required dependencies via `pip install -r requirements-bench.txt` from the triton/python/bench folder. - -Run the benchmarks through `python3 bench/run.py`, this will produce an HTML report in a results folder. diff --git a/python/bench/bench_blocksparse.py b/python/bench/bench_blocksparse.py deleted file mode 100644 index 5deb06edbd27..000000000000 --- a/python/bench/bench_blocksparse.py +++ /dev/null @@ -1,92 +0,0 @@ -import torch - -import triton - -# ------------------------------- -# Matrix Multiplication -# ------------------------------- - -nt = {False: 'n', True: 't'} -square_confs = [ - triton.testing.Benchmark( - x_names=['M', 'N', 'K'], - x_vals=[128, 256, 512, 1024, 2048, 3072, 4096, 6144], - line_arg='block', - line_vals=[16, 32, 64, 128], - line_names=['Block16', 'Block32', 'Block64', 'Block128'], - ylabel='TFLOPS', - plot_name=f'{op_mode}-{layout_mode}-square-{nt[AT]}{nt[BT]}', - args={'layout_mode': layout_mode, 'op_mode': op_mode, - 'AT': AT, 'BT': BT, 'dtype': torch.float16, 'provider': 'triton'} - ) - for AT in [False] for BT in [False] - for op_mode in ['dsd'] for layout_mode in ['dense'] -] - - -@triton.testing.perf_report(square_confs) -def bench_matmul(M, N, K, block, layout_mode, op_mode, AT, BT, dtype, provider, warmup=100, rep=1000): - Z, H = 1, 1 - make_layout = { - 'tril': lambda H, M, N: torch.tril(torch.ones((H, M, N), dtype=torch.int64)), - 'dense': lambda H, M, N: torch.ones(H, M, N, dtype=torch.int64), - }[layout_mode] - # create layout - shape = {'sdd': (M, N), 'dsd': (K, M) if AT else (M, K), 'dds': (N, K) if BT else (K, N)}[op_mode] - layout = make_layout(H, shape[0] // block, shape[1] // block) - # creat inputs - a = torch.randn((Z, H, K, M) if AT else (Z, H, M, K), dtype=dtype, device='cuda') - b = torch.randn((Z, H, N, K) if BT else (Z, H, K, N), dtype=dtype, device='cuda') - # create op - tflops = lambda ms: num_flops / ms * 1e3 - if provider == 'triton': - op = triton.ops.blocksparse.matmul(layout, block, op_mode, device="cuda", trans_a=AT, trans_b=BT) - # inputs - a = triton.testing.sparsify_tensor(a, layout, block) if op_mode == 'dsd' else a - b = triton.testing.sparsify_tensor(b, layout, block) if op_mode == 'dds' else b - mean_ms, min_ms, max_ms = triton.testing.do_bench(lambda: op(a, b), warmup=warmup, rep=rep) - num_flops = { - 'sdd': 2 * Z * K * float(layout.sum()) * block * block, - 'dsd': 2 * Z * N * float(layout.sum()) * block * block, - 'dds': 2 * Z * M * float(layout.sum()) * block * block - }[op_mode] * 1e-12 - return tflops(mean_ms), tflops(min_ms), tflops(max_ms) - - -# ------------------------------- -# Softmax -# ------------------------------- - -square_confs = [ - triton.testing.Benchmark( - x_names=['M', 'N'], - x_vals=[128, 256, 512, 1024, 2048, 3072, 4096, 6144], - line_arg='block', - line_vals=[16, 32, 64], - line_names=['Block16', 'Block32', 'Block64'], - ylabel='GBPS', - plot_name=f'{layout_mode}-square', - args={'layout_mode': layout_mode, 'dtype': torch.float16, 'provider': 'triton'} - ) - for layout_mode in ['dense', 'tril'] -] - - -@triton.testing.perf_report(square_confs) -def bench_softmax(M, N, block, layout_mode, dtype, provider, warmup=10, rep=50): - Z, H = 1, 1 - make_layout = { - 'tril': lambda H, M, N: torch.tril(torch.ones((H, M, N), dtype=torch.int64)), - 'dense': lambda H, M, N: torch.ones(H, M, N, dtype=torch.int64), - }[layout_mode] - layout = make_layout(H, M // block, N // block) - a = torch.randn((Z, H, M, N), dtype=dtype, device='cuda') - if provider == 'triton': - a = triton.testing.sparsify_tensor(a, layout, block) - op = triton.ops.blocksparse.softmax(layout, block, device="cuda") - gbps = lambda ms: (2 * a.numel() * a.element_size() * 1e-9) / (ms * 1e-3) - mean_ms, min_ms, max_ms = triton.testing.do_bench(lambda: op(a), warmup=warmup, rep=rep) - return gbps(mean_ms), gbps(min_ms), gbps(max_ms) - - -bench_matmul.run(print_data=True, show_plots=True) diff --git a/python/bench/bench_cross_entropy.py b/python/bench/bench_cross_entropy.py deleted file mode 100644 index aaa0e28f5423..000000000000 --- a/python/bench/bench_cross_entropy.py +++ /dev/null @@ -1,41 +0,0 @@ -import torch - -import triton - -confs = [ - triton.testing.Benchmark( - x_names=['N'], - x_vals=[128, 256, 512, 1024, 2048, 3072, 4096, 6144, 8192], - line_arg='provider', - line_vals=['triton', 'torch'], - line_names=['Triton', 'Torch'], - ylabel='GBPS', - plot_name=f'{mode}-2048', - args={'M': 2048, 'dtype': torch.float16, 'mode': mode} - ) - for mode in ['forward', 'backward'] -] - - -@triton.testing.perf_report(confs) -def bench_op(M, N, dtype, mode, provider): - # create inputs - x = torch.randn(M, N, dtype=dtype, device='cuda', requires_grad=True) - idx = 4 + torch.ones(M, dtype=torch.int64, device='cuda') - num_gb = (2 * x.numel() * x.element_size() * 1e-9) - gbps = lambda ms: num_gb / ms * 1e3 - # forward pass - op = {'torch': torch.nn.CrossEntropyLoss(reduction='none'), - 'triton': triton.ops.cross_entropy}[provider] - if mode == 'forward': - mean_ms, min_ms, max_ms = triton.testing.do_bench(lambda: op(x, idx)) - if mode == 'backward': - y = op(x, idx) - dy = torch.randn_like(y) - fn = lambda: y.backward(dy, retain_graph=True) - mean_ms, min_ms, max_ms = triton.testing.do_bench(fn, grad_to_none=[x]) - return gbps(mean_ms), gbps(min_ms), gbps(max_ms) - - -if __name__ == '__main__': - bench_op.run(print_data=True) diff --git a/python/bench/bench_matmul.py b/python/bench/bench_matmul.py deleted file mode 100644 index 30864f39114e..000000000000 --- a/python/bench/bench_matmul.py +++ /dev/null @@ -1,67 +0,0 @@ -import torch - -import triton - - -def rounded_linspace(low, high, steps, div): - ret = torch.linspace(low, high, steps) - ret = torch.div(ret.int() + div - 1, div, rounding_mode='trunc') * div - ret = torch.unique(ret) - return list(map(int, ret)) - - -# Square benchmarks -nt = {False: "n", True: "t"} -square_confs = [ - triton.testing.Benchmark( - x_names=["M", "N", "K"], - x_vals=rounded_linspace(512, 8192, 32, 128), - line_arg="provider", - line_vals=["cublas", "triton", "cutlass"], - line_names=["cuBLAS", "Triton", "CUTLASS"], - ylabel="TFLOPS", - plot_name=f"matmul-square-{nt[AT]}{nt[BT]}", - args={"AT": AT, "BT": BT, "dtype": torch.float16}, - ) for AT in [False] for BT in [False] -] - -# Transformer training benchmarks -transformer_confs = [ - triton.testing.Benchmark( - x_names=[x], - x_vals=rounded_linspace(NK // 16, NK, 32, 128), - line_arg="provider", - line_vals=["cublas", "triton", "cutlass"], - line_names=["cuBLAS", "Triton", "CUTLASS"], - ylabel="TFLOPS", - plot_name=f"matmul-M{M}-{'NK'.replace(x, '')}{NK}", - args={"M": M, 'NK'.replace(x, ''): NK, "AT": False, "BT": False, "dtype": torch.float16} - ) for NK in [12288] - for i, x in enumerate(["N", "K"]) - for M in [2048] -] - - -@triton.testing.perf_report(square_confs) -def bench_op(M, N, K, AT, BT, dtype, provider, warmup=25, rep=75): - a = torch.rand((K, M) if AT else (M, K), device="cuda", dtype=dtype) - b = torch.rand((N, K) if BT else (K, N), device="cuda", dtype=dtype) - if AT: - a = a.t() - if BT: - b = b.t() - tflops = lambda ms: 2. * M * N * K / ms * 1e-9 - if provider == "cublas": - ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(a, b), warmup=warmup, rep=rep) - return tflops(ms), tflops(max_ms), tflops(min_ms) - if provider == "triton": - ms, min_ms, max_ms = triton.testing.do_bench(lambda: triton.ops.matmul(a, b), warmup=warmup, rep=rep) - return tflops(ms), tflops(max_ms), tflops(min_ms) - if provider == "cutlass": - cutlass_matmul = triton.testing.cutlass_matmul - try: - ms, min_ms, max_ms = triton.testing.do_bench(lambda: cutlass_matmul(a, b), warmup=warmup, rep=rep) - return tflops(ms), tflops(max_ms), tflops(min_ms) - except Exception: - return None - return None diff --git a/python/bench/requirements-bench.txt b/python/bench/requirements-bench.txt deleted file mode 100644 index eb69de862c9a..000000000000 --- a/python/bench/requirements-bench.txt +++ /dev/null @@ -1,2 +0,0 @@ -pandas >= 1.3.3 -matplotlib >= 3.4.3 \ No newline at end of file diff --git a/python/bench/run.py b/python/bench/run.py deleted file mode 100644 index 5e6e3b392012..000000000000 --- a/python/bench/run.py +++ /dev/null @@ -1,44 +0,0 @@ -import argparse -import inspect -import os -import sys - -import triton - - -def run_all(result_dir, names): - if not os.path.exists(result_dir): - os.makedirs(result_dir) - for mod in os.listdir(os.path.dirname(os.path.realpath(__file__))): - # skip non python files - if not mod.endswith('.py'): - continue - # skip file not in provided names - if names and names not in mod: - continue - # skip files that don't start with 'bench_' - if not mod.startswith('bench_'): - continue - print(f'running {mod}...') - mod = __import__(os.path.splitext(mod)[0]) - benchmarks = inspect.getmembers(mod, lambda x: isinstance(x, triton.testing.Mark)) - for name, bench in benchmarks: - curr_dir = os.path.join(result_dir, mod.__name__.replace('bench_', '')) - if len(benchmarks) > 1: - curr_dir = os.path.join(curr_dir, name.replace('bench_', '')) - if not os.path.exists(curr_dir): - os.makedirs(curr_dir) - bench.run(save_path=curr_dir) - - -def main(args): - parser = argparse.ArgumentParser(description="Run the benchmark suite.") - parser.add_argument("-r", "--result-dir", type=str, default='results', required=False) - parser.add_argument("-n", "--names", type=str, default='', required=False) - parser.set_defaults(feature=False) - args = parser.parse_args(args) - run_all(args.result_dir, args.names) - - -if __name__ == '__main__': - main(sys.argv[1:]) diff --git a/python/examples/copy_strided.py b/python/examples/copy_strided.py new file mode 100644 index 000000000000..922c5ba5ce3f --- /dev/null +++ b/python/examples/copy_strided.py @@ -0,0 +1,19 @@ + +import triton +import triton.language as tl + + +# triton kernel +@triton.jit +def kernel(X, stride_xm, + Z, stride_zn, + BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr): + off_m = tl.arange(0, BLOCK_M) + off_n = tl.arange(0, BLOCK_N) + Xs = X + off_m[:, None] * stride_xm + off_n[None, :] * 1 + Zs = Z + off_m[:, None] * 1 + off_n[None, :] * stride_zn + tl.store(Zs, tl.load(Xs)) + + +ret = triton.compile(kernel, "*fp32,i32,*fp32,i32", constants={"BLOCK_M": 64, "BLOCK_N": 64}, output="ttgir") +print(ret) diff --git a/python/examples/empty.py b/python/examples/empty.py new file mode 100644 index 000000000000..df313fb85869 --- /dev/null +++ b/python/examples/empty.py @@ -0,0 +1,13 @@ +import torch + +import triton +import triton.language as tl + + +@triton.jit +def kernel(X, stride_xm, stride_xn, BLOCK: tl.constexpr): + pass + + +X = torch.randn(1, device="cuda") +pgm = kernel[(1,)](X, 1, 1, BLOCK=1024) diff --git a/python/setup.py b/python/setup.py index e2fdbdcfbf36..3adcc6db80d8 100644 --- a/python/setup.py +++ b/python/setup.py @@ -1,5 +1,4 @@ import distutils -import distutils.spawn import os import platform import re @@ -25,42 +24,54 @@ def get_build_type(): return "Debug" elif check_env_flag("REL_WITH_DEB_INFO"): return "RelWithDebInfo" + elif check_env_flag("TRITON_REL_BUILD_WITH_ASSERTS"): + return "TritonRelBuildWithAsserts" else: - return "Release" + # TODO: change to release when stable enough + return "TritonRelBuildWithAsserts" -def use_system_llvm(): - if platform.system() == "Windows": - return True - versions = ['-11.0', '-11', '-11-64'] - supported = ['llvm-config{v}'.format(v=v) for v in versions] - paths = [distutils.spawn.find_executable(cfg) for cfg in supported] - return any(p is not None for p in paths) +# --- third party packages ----- +class Package(NamedTuple): + package: str + name: str + url: str + test_file: str + include_flag: str + lib_flag: str + syspath_var_name: str -def get_thirdparty_packages(triton_cache_path): - class Package(NamedTuple): - package: str - name: str - url: str - test_file: str - include_flag: str - lib_flag: str - - packages = [ - Package("pybind11", "pybind11-2.10.0", "https://github.com/pybind/pybind11/archive/refs/tags/v2.10.0.tar.gz", "include/pybind11/pybind11.h", "PYBIND11_INCLUDE_DIR", "") - ] - if not use_system_llvm(): - # download LLVM if no suitable system LLVM is installed - packages.append( - Package("llvm", "clang+llvm-11.0.1-x86_64-linux-gnu-ubuntu-16.04", "https://github.com/llvm/llvm-project/releases/download/llvmorg-11.0.1/clang+llvm-11.0.1-x86_64-linux-gnu-ubuntu-16.04.tar.xz", "lib", "LLVM_INCLUDE_DIRS", "LLVM_LIBRARY_DIR") - ) +def get_pybind11_package_info(): + name = "pybind11-2.10.0" + url = "https://github.com/pybind/pybind11/archive/refs/tags/v2.10.0.tar.gz" + return Package("pybind11", name, url, "include/pybind11/pybind11.h", "PYBIND11_INCLUDE_DIR", "", "PYBIND11_SYSPATH") + + +def get_llvm_package_info(): + # download if nothing is installed + system = platform.system() + system_suffix = {"Linux": "linux-gnu-ubuntu-18.04", "Darwin": "apple-darwin"}[system] + use_assert_enabled_llvm = check_env_flag("TRITON_USE_ASSERT_ENABLED_LLVM", "False") + if use_assert_enabled_llvm: + name = 'llvm+mlir-14.0.0-x86_64-{}-assert'.format(system_suffix) + url = "https://github.com/shintaro-iwasaki/llvm-releases/releases/download/llvm-14.0.0-329fda39c507/{}.tar.xz".format(name) + else: + name = 'clang+llvm-14.0.0-x86_64-{}'.format(system_suffix) + url = "https://github.com/llvm/llvm-project/releases/download/llvmorg-14.0.0/{}.tar.xz".format(name) + return Package("llvm", name, url, "lib", "LLVM_INCLUDE_DIRS", "LLVM_LIBRARY_DIR", "LLVM_SYSPATH") + + +def get_thirdparty_packages(triton_cache_path): + packages = [get_pybind11_package_info(), get_llvm_package_info()] thirdparty_cmake_args = [] for p in packages: package_root_dir = os.path.join(triton_cache_path, p.package) package_dir = os.path.join(package_root_dir, p.name) test_file_path = os.path.join(package_dir, p.test_file) + if p.syspath_var_name in os.environ: + package_dir = os.environ[p.syspath_var_name] if not os.path.exists(test_file_path): try: shutil.rmtree(package_root_dir) @@ -77,6 +88,8 @@ class Package(NamedTuple): thirdparty_cmake_args.append("-D{}={}/lib".format(p.lib_flag, package_dir)) return thirdparty_cmake_args +# ---- cmake extension ---- + class CMakeExtension(Extension): def __init__(self, name, path, sourcedir=""): @@ -113,22 +126,27 @@ def run(self): self.build_extension(ext) def build_extension(self, ext): + lit_dir = shutil.which('lit') triton_cache_path = os.path.join(os.environ["HOME"], ".triton") + # lit is used by the test suite thirdparty_cmake_args = get_thirdparty_packages(triton_cache_path) extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.path))) # create build directories if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) # python directories - python_include_dirs = [distutils.sysconfig.get_python_inc()] + python_include_dir = distutils.sysconfig.get_python_inc() cmake_args = [ + "-DLLVM_ENABLE_WERROR=ON", "-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + extdir, - "-DBUILD_TUTORIALS=OFF", - "-DBUILD_PYTHON_MODULE=ON", + "-DTRITON_BUILD_TUTORIALS=OFF", + "-DTRITON_BUILD_PYTHON_MODULE=ON", # '-DPYTHON_EXECUTABLE=' + sys.executable, - # '-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON', - "-DPYTHON_INCLUDE_DIRS=" + ";".join(python_include_dirs) + '-DCMAKE_VERBOSE_MAKEFILE:BOOL=ON', + "-DPYTHON_INCLUDE_DIRS=" + python_include_dir, + "-DLLVM_EXTERNAL_LIT=" + lit_dir ] + thirdparty_cmake_args + # configuration cfg = get_build_type() build_args = ["--config", cfg] @@ -155,16 +173,17 @@ def build_extension(self, ext): author_email="phil@openai.com", description="A language and compiler for custom Deep Learning operations", long_description="", - packages=["triton", "triton/_C", "triton/language", "triton/runtime", "triton/tools", "triton/ops", "triton/ops/blocksparse"], + packages=["triton", "triton/_C", "triton/language", "triton/tools", "triton/ops", "triton/runtime", "triton/ops/blocksparse"], install_requires=[ "cmake", "filelock", "torch", + "lit", ], package_data={ "triton/ops": ["*.c"], "triton/ops/blocksparse": ["*.c"], - "triton/language": ["*.bc"], + "triton/language": ["*.bc"] }, include_package_data=True, ext_modules=[CMakeExtension("triton", "triton/_C/")], @@ -180,6 +199,7 @@ def build_extension(self, ext): "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.6", ], + test_suite="tests", extras_require={ "tests": [ "autopep8", diff --git a/python/src/cutlass.cc b/python/src/cutlass.cc deleted file mode 100644 index 14da81330b12..000000000000 --- a/python/src/cutlass.cc +++ /dev/null @@ -1,202 +0,0 @@ -#include "cutlass/library/handle.h" -#include "cutlass/library/library.h" -#include "cutlass/library/operation_table.h" -#include "cutlass/library/singleton.h" -#include "pybind11/pybind11.h" -#include "triton/tools/bench.hpp" - -using namespace cutlass; -using namespace cutlass::library; - -std::map, const Operation *> op_cache_; - -static int const kHostWorkspaceSize = (4 << 10); -static int const kDeviceWorkspaceSize = (4 << 20); - -void run(int M, int N, int K, - int lda, int ldb, int ldc, int ldd, - void const *ptr_A, void const *ptr_B, void const *ptr_C, void *ptr_D, - void const *alpha, void const *beta, - ScalarPointerMode scalar_mode, - const Operation *operation, - cudaStream_t stream) { - - GemmUniversalConfiguration configuration{ - GemmUniversalMode::kGemm, - {M, N, K}, - 1, - lda, - ldb, - ldc, - ldd}; - - // host workspace size - uint64_t host_workspace_size_needed = operation->get_host_workspace_size(&configuration); - if (uint64_t(kHostWorkspaceSize) < host_workspace_size_needed) - throw std::runtime_error("Unable to find gemm operation"); - char host_workspace[kHostWorkspaceSize]; - - // device workspace size - uint64_t device_workspace_size_needed = operation->get_device_workspace_size(&configuration); - if (uint64_t(kDeviceWorkspaceSize) < device_workspace_size_needed) - throw std::runtime_error("Unable to find gemm operation"); - static void *device_workspace; - - // Initialize host and device workspaces - Status status = operation->initialize(&configuration, host_workspace, device_workspace, stream); - if (status != cutlass::Status::kSuccess) - throw std::runtime_error("Unable to initialize workspace"); - - // Run the operator - GemmArguments arguments{ptr_A, ptr_B, ptr_C, ptr_D, alpha, beta, scalar_mode}; - operation->run(&arguments, host_workspace, device_workspace, stream); -} - -const Operation *autotune(int M, int N, int K, - NumericTypeID element_compute, - NumericTypeID element_scalar, - void const *alpha, - NumericTypeID element_A, - LayoutTypeID layout_A, - ComplexTransform transform_A, - void const *ptr_A, - int lda, - NumericTypeID element_B, - LayoutTypeID layout_B, - ComplexTransform transform_B, - void const *ptr_B, - int ldb, - void const *beta, - NumericTypeID element_C, - void const *ptr_C, - int ldc, - void *ptr_D, - int ldd, - ScalarPointerMode scalar_mode, - int device_id, - cudaStream_t stream) { - - // index operation table with functional key - GemmFunctionalKey key( - Provider::kCUTLASS, - GemmKind::kUniversal, - element_compute, - element_scalar, - element_A, - layout_A, - transform_A, - element_B, - layout_B, - transform_B, - element_C); - - auto operators_it = Singleton::get().operation_table.gemm_operations.find(key); - if (operators_it == Singleton::get().operation_table.gemm_operations.end()) - throw std::runtime_error("Unable to find gemm operation"); - if (operators_it->second.empty()) - throw std::runtime_error("Unable to find gemm operation"); - - cudaDeviceProp device_prop; - cudaError_t error = cudaGetDeviceProperties(&device_prop, device_id); - if (error != cudaSuccess) - throw std::runtime_error("Unable to get device properties"); - int cc = device_prop.major * 10 + device_prop.minor; - - // index operation table with preference key - // assume 8-bytes aligned memory pointers - int alignment = 8; - GemmPreferenceKey preference_key(cc, alignment); - auto autotune_it = operators_it->second.find(preference_key); - if (autotune_it == operators_it->second.end()) - throw std::runtime_error("Unable to find gemm operation"); - const std::vector &operations = autotune_it->second; - if (operations.empty()) - throw std::runtime_error("Unable to find gemm operation"); - - // auto-tune - const Operation *best = nullptr; - double best_ms = std::numeric_limits::max(); - for (const Operation *op : operations) { - auto fn = [&]() { run(M, N, K, lda, ldb, ldc, ldd, ptr_A, ptr_B, ptr_C, ptr_D, - alpha, beta, scalar_mode, op, stream); }; - triton::driver::cu_stream tt_stream((CUstream)stream, false); - double ms = triton::tools::bench(fn, &tt_stream, 10, 25); - if (ms < best_ms) { - best_ms = ms; - best = op; - } - } - return best; -} - -// map of torch datatypes to cutlass datatypes -std::map type_map = { - {"float16", NumericTypeID::kF16}, - {"float32", NumericTypeID::kF32}, - {"float64", NumericTypeID::kF64}}; - -void cutlass_matmul(uintptr_t A, uintptr_t B, uintptr_t C, - size_t M, size_t N, size_t K, - size_t stride_a_0, size_t stride_a_1, - size_t stride_b_0, size_t stride_b_1, - size_t stride_c_0, size_t stride_c_1, - std::string type_a, std::string type_b, std::string type_c, - size_t dev_id, uint64_t stream_handle) { - void *ptr_A = (void *)A; - void *ptr_B = (void *)B; - void *ptr_C = (void *)C; - void *ptr_D = ptr_C; - size_t lda = stride_a_0; - size_t ldb = stride_b_0; - size_t ldc = stride_c_1; - size_t ldd = ldc; - float alpha = 1.0f; - float beta = 0.0f; - // layout for A - LayoutTypeID layout_A; - if (stride_a_0 == 1) - layout_A = LayoutTypeID::kColumnMajor; - else if (stride_a_1 == 1) - layout_A = LayoutTypeID::kRowMajor; - else - throw std::runtime_error("A layout is not supported"); - // layout for B - LayoutTypeID layout_B; - if (stride_b_0 == 1) - layout_B = LayoutTypeID::kColumnMajor; - else if (stride_b_1 == 1) - layout_B = LayoutTypeID::kRowMajor; - else - throw std::runtime_error("B layout is not supported"); - // data types - NumericTypeID element_compute = NumericTypeID::kF32; - NumericTypeID element_A = type_map[type_a]; - NumericTypeID element_B = type_map[type_b]; - NumericTypeID element_C = type_map[type_c]; - // misc. flags - ScalarPointerMode scalar_mode = ScalarPointerMode::kHost; - NumericTypeID element_scalar = NumericTypeID::kF32; - ComplexTransform transform_A = ComplexTransform::kNone; - ComplexTransform transform_B = ComplexTransform::kNone; - // runtime flags - cudaStream_t stream = (cudaStream_t)stream_handle; - // auto-tune - std::vector tune_key = {M, N, K, (size_t)element_A, (size_t)element_B, (size_t)element_C, - dev_id, (size_t)element_compute, (size_t)scalar_mode}; - auto it = op_cache_.find(tune_key); - if (it == op_cache_.end()) { - const Operation *op = autotune(M, N, K, element_compute, element_scalar, &alpha, - element_A, layout_A, transform_A, ptr_A, lda, - element_B, layout_B, transform_B, ptr_B, ldb, - &beta, element_C, ptr_C, ldc, ptr_D, ldd, scalar_mode, - dev_id, stream); - it = op_cache_.insert({tune_key, op}).first; - } - run(M, N, K, lda, ldb, ldc, ldd, ptr_A, ptr_B, ptr_C, ptr_D, &alpha, &beta, - scalar_mode, it->second, stream); -} - -void init_cutlass(pybind11::module &m) { - pybind11::module subm = m.def_submodule("cutlass"); - subm.def("matmul", &cutlass_matmul, "matrix multiplication"); -} \ No newline at end of file diff --git a/python/src/functions.h b/python/src/functions.h deleted file mode 100644 index 40142ebddc70..000000000000 --- a/python/src/functions.h +++ /dev/null @@ -1,696 +0,0 @@ -#include "triton/ir/builder.h" -#include -#include -#include - -namespace ir = triton::ir; -namespace py = pybind11; - -static const std::string _builder_doc = R"pbdoc( - :param builder: IR builder to generate code into, optional, set automatically when called inside a @triton.jit function - :type builder: triton.ir.builder -)pbdoc"; - -#define VA_ARGS(...) , ##__VA_ARGS__ -#define DEF_FUNC(MOD, PY_NAME, C_FUNC, ...) \ - MOD.def(PY_NAME, C_FUNC, (C_FUNC##_docstr + _builder_doc).c_str(), \ - ret::reference VA_ARGS(__VA_ARGS__), "builder"_a) - -void throw_not_implemented(std::string key) { - throw std::runtime_error("Encountered unimplemented code path in `" + key + "`. This is likely a bug on our side."); -} - -void throw_not_int_or_float(std::string key) { - throw std::runtime_error("`" + key + "` only supported for integer and floating point types."); -} - -enum type_code { - _bool, - int8, - int16, - int32, - int64, - float16, - float32, - float64 -}; - -ir::type *make_ir(type_code ty, ir::builder *builder) { - switch (ty) { - case float16: - return builder->get_half_ty(); - case float32: - return builder->get_float_ty(); - default: - throw_not_implemented("make_ir"); - } -} - -type_code from_ir(ir::type *ty) { - if (ty->is_half_ty()) - return float16; - if (ty->is_float_ty()) - return float32; - throw_not_implemented("from_ir"); -} - -/*---------------------------------------------- - definition of triton.cast / triton.ir.value.to - ----------------------------------------------*/ -std::string cast_docstr = R"pbdoc( - Tries to cast a block to a new data type. - - :param input: The input block. - :type input: triton.ir.value -)pbdoc"; - -ir::value *cast(ir::value *input, type_code _dtype, ir::builder *builder) { - ir::type *src_ty = input->get_type(); - ir::type *dst_ty = make_ir(_dtype, builder); - if (src_ty->is_block_ty()) - dst_ty = ir::block_type::get(dst_ty, input->get_type()->get_block_shapes()); - ir::type *src_sca_ty = src_ty->get_scalar_ty(); - ir::type *dst_sca_ty = dst_ty->get_scalar_ty(); - // FP Truncation - bool truncate_fp = src_sca_ty->is_floating_point_ty() && - dst_sca_ty->is_floating_point_ty() && - src_sca_ty->get_fp_mantissa_width() > dst_sca_ty->get_fp_mantissa_width(); - if (truncate_fp) - return builder->create_fp_trunc(input, dst_ty); - // FP Extension - bool ext_fp = src_sca_ty->is_floating_point_ty() && - dst_sca_ty->is_floating_point_ty() && - src_sca_ty->get_fp_mantissa_width() < dst_sca_ty->get_fp_mantissa_width(); - if (ext_fp) - return builder->create_fp_ext(input, dst_ty); - // Int cast - if (src_sca_ty->is_integer_ty() && dst_sca_ty->is_integer_ty() && - src_sca_ty->get_integer_bitwidth() != dst_sca_ty->get_integer_bitwidth()) - return builder->create_int_cast(input, dst_ty, true); - // Float -> Int - if (src_sca_ty->is_floating_point_ty() && dst_sca_ty->is_integer_ty()) - return builder->create_fp_to_si(input, dst_ty); - // int -> Float - if (src_sca_ty->is_integer_ty() && dst_sca_ty->is_floating_point_ty()) - return builder->create_si_to_fp(input, dst_ty); - // Ptr -> Ptr - if (src_sca_ty->is_pointer_ty() && dst_sca_ty->is_pointer_ty()) - return builder->create_cast(ir::BitCast, input, dst_ty); - // * -> Bool - if (dst_sca_ty->is_bool_ty()) { - if (src_sca_ty->is_pointer_ty()) - input = cast(input, int64, builder); - ir::value *other = builder->get_int64(0); - if (src_ty->is_bool_ty()) - other = builder->create_splat(other, src_ty->get_block_shapes()); - return builder->create_icmpNE(input, other); - } - throw_not_implemented("cast from " + src_sca_ty->repr() + " to " + dst_sca_ty->repr()); -} - -/*---------------------------------------------- - definition of triton.broadcast_check - ----------------------------------------------*/ -std::string try_broadcast_docstr = R"pbdoc( - Tries to broadcast two blocks to a common compatible shape. - - :param input: The first input block. - :type input: triton.ir.value - :param other: The second input block. - :type other: triton.ir.value -)pbdoc"; - -std::tuple try_broadcast(ir::value *lhs, ir::value *rhs, ir::builder *builder) { - ir::type *lhs_ty = lhs->get_type(); - ir::type *rhs_ty = rhs->get_type(); - // make_shape_compatible(block, scalar) - if (lhs_ty->is_block_ty() && !rhs_ty->is_block_ty()) - rhs = builder->create_splat(rhs, lhs_ty->get_block_shapes()); - // make_shape_compatible(scalar, block) - else if (!lhs_ty->is_block_ty() && rhs_ty->is_block_ty()) - lhs = builder->create_splat(lhs, rhs_ty->get_block_shapes()); - // make_shape_compatible(block, block) - else if (lhs_ty->is_block_ty() && rhs_ty->is_block_ty()) { - auto lhs_shape = lhs_ty->get_block_shapes(); - auto rhs_shape = rhs_ty->get_block_shapes(); - if (lhs_shape.size() != rhs_shape.size()) - throw std::runtime_error("Cannot make_shape_compatible: blocks must have the same rank"); - ir::type::block_shapes_t ret_shape; - for (size_t i = 0; i < lhs_shape.size(); ++i) { - unsigned left = lhs_shape[i]; - unsigned right = rhs_shape[i]; - if (left == 1) - ret_shape.push_back(right); - else if (right == 1) - ret_shape.push_back(left); - else if (left == right) - ret_shape.push_back(left); - else - throw std::runtime_error("Cannot make_shape_compatible: incompatible dimensions at index " + std::to_string(i) + - ": " + std::to_string(left) + " and " + std::to_string(right)); - } - if (lhs_shape != ret_shape) - lhs = builder->create_broadcast(lhs, ret_shape); - if (rhs_shape != ret_shape) - rhs = builder->create_broadcast(rhs, ret_shape); - } - return std::make_tuple(lhs, rhs); -} - -/*---------------------------------------------- - definition of triton.broadcast_to - ----------------------------------------------*/ -std::string broadcast_to_docstr = R"pbdoc( - Tries to broadcast a block to a new shape. - - :param input: The input block. - :type input: triton.value - :param shape: The new shape. - :type shape: tuple of int -)pbdoc"; - -ir::value *broadcast_to(ir::value *input, const ir::type::block_shapes_t &shape, ir::builder *builder) { - if (!input->get_type()->is_block_ty()) - return builder->create_splat(input, shape); - auto src_shape = input->get_type()->get_block_shapes(); - if (src_shape.size() != shape.size()) - throw std::runtime_error("Cannot broadcast"); - return builder->create_broadcast(input, shape); -} - -/*---------------------------------------------- - definition of triton.load - ----------------------------------------------*/ -std::string load_docstr = R"pbdoc( - Return a block of data whose values are, elementwise, loaded from memory at location defined by `pointer`. - - :param pointer: Pointer to the data to be loaded. - :type pointer: Block of triton.pointer - :param mask: if mask[idx] is false, do not load the data at `pointer[idx]`. - :type mask: Block of triton.bool, optional - :param other: if mask[idx] is false, return other[idx] instead of 'pointer[idx]` - :type other: Block of triton.value, optional - )pbdoc"; - -ir::value *load(ir::value *pointer, std::optional _mask, std::optional _other, ir::builder *builder) { - if (!_mask.has_value() && !_other.has_value()) - return builder->create_load(pointer); - if (!_mask.has_value()) - throw std::runtime_error("`other` cannot be provided without `mask`"); - ir::value *mask = _mask.value(); - ir::type *elt_ty = pointer->get_type()->get_scalar_ty()->get_pointer_element_ty(); - auto shape = pointer->get_type()->get_block_shapes(); - ir::value *other = _other.has_value() ? _other.value() : ir::undef_value::get(elt_ty); - other = cast(other, from_ir(elt_ty), builder); - other = broadcast_to(other, shape, builder); - mask = broadcast_to(mask, shape, builder); - return builder->create_masked_load(pointer, mask, other); -} - -/*---------------------------------------------- - definition of triton.store - ----------------------------------------------*/ -std::string store_docstr = R"pbdoc( - Stores `value` block of elements in memory, element-wise, at the memory locations specified by `pointer`. - - :param pointer: The memory locations where the elements of `value` are stored. - :type pointer: Block of triton.pointer - :param value: The block of elements to be stored. - :type value: Block of triton.value - :param mask: If mask[idx] is false, do not store `value[idx]` at `pointer[idx]`. - :type mask: Block of triton.bool, optional - )pbdoc"; -ir::value *store(ir::value *ptr, ir::value *val, std::optional _mask, ir::builder *builder) { - if (!_mask.has_value()) - return builder->create_store(ptr, val); - ir::value *mask = _mask.value(); - return builder->create_masked_store(ptr, val, mask); -} - -/*---------------------------------------------- - definition of triton.dot - ----------------------------------------------*/ -std::string dot_docstr = R"pbdoc( - Returns the matrix product of two blocks. - The two blocks must be two dimensions and have compatible inner dimensions. - - :param input: The first block to be multiplied. - :type input: 2D block of scalar-type in {`float16`, `float32`} - :param other: The second block to be multiplied. - :type other: 2D block of scalar-type in {`float16`, `float32`} - )pbdoc"; -ir::value *dot(ir::value *lhs, ir::value *rhs, ir::builder *builder) { - ir::value *_0 = builder->get_float32(0); - unsigned M = lhs->get_type()->get_block_shapes()[0]; - unsigned N = rhs->get_type()->get_block_shapes()[1]; - _0 = builder->create_splat(_0, {M, N}); - return builder->create_dot(lhs, rhs, _0); -} - -/*---------------------------------------------- - definition of triton.where - ----------------------------------------------*/ -std::string where_docstr = R"pbdoc( - Returns a block of elements from either `x` or `y`, depending on `condition`. - Note that `x` and `y` are always evaluated regardless of the value of `condition`. - If you want to avoid unintended memory operations, use the `mask` arguments in `triton.load` and `triton.store` instead. - - :param condition: When True (nonzero), yield x, otherwise yield y. - :type condition: Block of triton.bool - :param x: values selected at indices where condition is True. - :param y: values selected at indices where condition is False. - )pbdoc"; -ir::value *where(ir::value *condition, ir::value *x, ir::value *y, ir::builder *builder) { - return builder->create_select(condition, x, y); -}; - -/*---------------------------------------------- - definition of triton.arange - ----------------------------------------------*/ -std::string arange_docstr = R"pbdoc( - Returns contiguous values within the open interval [start, end). - - :param start: Start of the interval. - :type start: int - :param stop: End of the interval. - :type stop: int - )pbdoc"; -ir::value *arange(int start, int end, ir::builder *builder) { - return builder->get_range(start, end); -}; - -/*---------------------------------------------- - definition of triton.program_id - ----------------------------------------------*/ -std::string program_id_docstr = R"pbdoc( - Returns the id of the current program instance along the given `axis`. - Triton uses an SPMD model in which different @triton.jit functions run in parallel with different `program_id`s. - - :param axis: The axis of the 3D launch grid. Has to be either 0, 1 or 2. - :type axis: int - )pbdoc"; -ir::value *program_id(int axis, ir::builder *builder) { - return builder->create_get_program_id(axis); -}; - -/*---------------------------------------------- - definition of triton.num_programs - ----------------------------------------------*/ -std::string num_programs_docstr = R"pbdoc( - Returns the number of program instances launched along the given `axis`. - - :param axis: The axis of the 3D launch grid. Has to be either 0, 1 or 2. - :type axis: int - )pbdoc"; -ir::value *num_programs(int axis, ir::builder *builder) { - return builder->create_get_num_programs(axis); -}; - -/*---------------------------------------------- - definition of triton.zeros - ----------------------------------------------*/ -std::string zeros_docstr = R"pbdoc( - Returns a block filled with the scalar value 0 and the given shape. - - :param shape: Shape of the new array, e.g., (8, 16) or (8, ) - :type shape: tuple of ints - :param dtype: Data-type of the new array, e.g., tl.float16 - :type dtype: triton.ir.dtype - )pbdoc"; -ir::value *zeros(ir::type::block_shapes_t shape, type_code _dtype, ir::builder *builder) { - ir::type *dtype = make_ir(_dtype, builder); - ir::value *_0 = ir::constant::get_null_value(dtype); - return builder->create_splat(_0, shape); -}; - -/*---------------------------------------------- - definition of triton.exp - ----------------------------------------------*/ -std::string _exp_docstr = R"pbdoc( - Returns the element-wise exponential of `input`. - )pbdoc"; -ir::value *_exp(ir::value *input, ir::builder *builder) { - return builder->create_exp(input); -}; - -/*---------------------------------------------- - definition of triton.log - ----------------------------------------------*/ -std::string _log_docstr = R"pbdoc( - Returns the element-wise natural logarithm of `input`. - )pbdoc"; -ir::value *_log(ir::value *input, ir::builder *builder) { - return builder->create_log(input); -}; - -/*---------------------------------------------- - definition of triton.sqrt - ----------------------------------------------*/ -std::string sqrt_docstr = R"pbdoc( - Returns the element-wise square root of `input`. - )pbdoc"; -ir::value *sqrt(ir::value *input, ir::builder *builder) { - return builder->create_sqrt(input); -}; - -ir::value *reduce_impl(ir::value *input, unsigned int axis, ir::builder *builder, const std::string &name, - ir::reduce_inst::op_t FLOAT_OP, ir::reduce_inst::op_t INT_OP) { - ir::type *scalar_ty = input->get_type()->get_scalar_ty(); - if (scalar_ty->is_floating_point_ty()) - return builder->create_reduce(input, FLOAT_OP, axis); - else if (scalar_ty->is_integer_ty()) - return builder->create_reduce(input, INT_OP, axis); - else - throw_not_int_or_float(name); -} - -/*---------------------------------------------- - definition of triton.min - ----------------------------------------------*/ -std::string min_docstr = R"pbdoc( - Returns the minimum value of `input`. - )pbdoc"; -ir::value *min(ir::value *input, unsigned int axis, ir::builder *builder) { - return reduce_impl(input, axis, builder, "min", ir::reduce_inst::FMIN, ir::reduce_inst::MIN); -}; - -/*---------------------------------------------- - definition of triton.arg_min - ----------------------------------------------*/ -std::string min_docstr = R"pbdoc( - Returns the minimum value's index of `input`. - )pbdoc"; -ir::value *argmin(ir::value *input, unsigned int axis, ir::builder *builder) { - return reduce_impl(input, axis, builder, "argmin", ir::reduce_inst::ARGFMIN, ir::reduce_inst::ARGMIN); -}; - -/*---------------------------------------------- - definition of triton.max - ----------------------------------------------*/ -std::string max_docstr = R"pbdoc( - Returns the maximum value of `input`. - )pbdoc"; -ir::value *max(ir::value *input, unsigned int axis, ir::builder *builder) { - return reduce_impl(input, axis, builder, "max", ir::reduce_inst::FMAX, ir::reduce_inst::MAX); -}; - -/*---------------------------------------------- - definition of triton.arg_max - ----------------------------------------------*/ -std::string max_docstr = R"pbdoc( - Returns the maximum value's index of `input`. - )pbdoc"; -ir::value *argmax(ir::value *input, unsigned int axis, ir::builder *builder) { - return reduce_impl(input, axis, builder, "argmax", ir::reduce_inst::ARGFMAX, ir::reduce_inst::ARGMAX); -}; - -/*---------------------------------------------- - definition of triton.sum - ----------------------------------------------*/ -std::string sum_docstr = R"pbdoc( - Returns the sum of `input`. - )pbdoc"; -ir::value *sum(ir::value *input, unsigned int axis, ir::builder *builder) { - return reduce_impl(input, axis, builder, "sum", ir::reduce_inst::FADD, ir::reduce_inst::ADD); -}; - -/*---------------------------------------------- - definition of triton.atomic_cas - ----------------------------------------------*/ -std::string atomic_cas_docstr = R"pbdoc( - Atomic compare-and-swap. - )pbdoc"; -ir::value *atomic_cas(ir::value *ptr, ir::value *cmp, ir::value *val, ir::builder *builder) { - return builder->create_atomic_cas(ptr, cmp, val); -}; - -/*---------------------------------------------- - definition of triton.atomic_xchg - ----------------------------------------------*/ -std::string atomic_xchg_docstr = R"pbdoc( - Atomic exchange. - )pbdoc"; -ir::value *atomic_xchg(ir::value *ptr, ir::value *val, ir::builder *builder) { - return builder->create_atomic_exch(ptr, val); -}; - -/*---------------------------------------------- - debug barrier - ----------------------------------------------*/ -std::string debug_barrier_docstr = R"pbdoc( - Temporary hacky fixup for when the compiler forgets to insert sync barriers -)pbdoc"; -ir::value *debug_barrier(ir::builder *builder) { - return builder->create_barrier(); -} - -#define DEF_BINARY_OP(MOD, PY_NAME, C_FUNC, ...) \ - MOD.def(PY_NAME, binary_op(C_FUNC), (C_FUNC##_docstr + _builder_doc).c_str(), \ - ret::reference VA_ARGS(__VA_ARGS__), "builder"_a) - -template -std::function -binary_op(const FN &fn) { - auto ret = [&fn](ir::value *self, ir::value *other, ir::builder *builder) { - //std::tie(self, other) = try_broadcast(self, other, builder); - return fn(self, other, builder); - }; - return ret; -} - -/*---------------------------------------------- - definition of self + other - ----------------------------------------------*/ -std::string add_docstr = R"pbdoc( - Returns self + other, element-wise. -)pbdoc"; -ir::value *add(ir::value *self, ir::value *other, ir::builder *builder) { - ir::type *scalar_ty = self->get_type()->get_scalar_ty(); - // ptr + offset - if (scalar_ty->is_pointer_ty()) - return builder->create_gep(self, {other}); - // float + float - else if (scalar_ty->is_floating_point_ty()) - return builder->create_fadd(self, other); - // int + int - else if (scalar_ty->is_integer_ty()) - return builder->create_add(self, other); - throw_not_implemented("add"); -} - -/*---------------------------------------------- - definition of self - other - ----------------------------------------------*/ -std::string sub_docstr = R"pbdoc( - Returns self - other, element-wise. -)pbdoc"; -ir::value *sub(ir::value *self, ir::value *other, ir::builder *builder) { - ir::type *scalar_ty = self->get_type()->get_scalar_ty(); - // ptr + offset - if (scalar_ty->is_pointer_ty()) - return builder->create_gep(self, {other}); - // float + float - if (scalar_ty->is_floating_point_ty()) - return builder->create_fsub(self, other); - // int + int - else if (scalar_ty->is_integer_ty()) - return builder->create_sub(self, other); - throw_not_implemented("sub"); -} - -/*---------------------------------------------- - definition of self * other - ----------------------------------------------*/ -std::string mul_docstr = R"pbdoc( - Returns self * other, element-wise. -)pbdoc"; -ir::value *mul(ir::value *self, ir::value *other, ir::builder *builder) { - ir::type *scalar_ty = self->get_type()->get_scalar_ty(); - // float * float - if (scalar_ty->is_floating_point_ty()) - return builder->create_fmul(self, other); - // int * int - else if (scalar_ty->is_integer_ty()) - return builder->create_mul(self, other); - throw_not_implemented("mul"); -} - -/*---------------------------------------------- - definition of self > other - ----------------------------------------------*/ -std::string greater_than_docstr = R"pbdoc( - Returns self > other, element-wise. -)pbdoc"; -ir::value *greater_than(ir::value *self, ir::value *other, ir::builder *builder) { - ir::type *scalar_ty = self->get_type()->get_scalar_ty(); - // float > float - if (scalar_ty->is_floating_point_ty()) - return builder->create_fcmpOGT(self, other); - // int > int - else if (scalar_ty->is_integer_ty()) - return builder->create_icmpSGT(self, other); - throw_not_implemented("greater_than"); -} - -/*---------------------------------------------- - definition of self >= other - ----------------------------------------------*/ -std::string greater_equal_docstr = R"pbdoc( - Returns self >= other, element-wise. -)pbdoc"; -ir::value *greater_equal(ir::value *self, ir::value *other, ir::builder *builder) { - ir::type *scalar_ty = self->get_type()->get_scalar_ty(); - // float >= float - if (scalar_ty->is_floating_point_ty()) - return builder->create_fcmpOGE(self, other); - // int >= int - else if (scalar_ty->is_integer_ty()) - return builder->create_icmpSGE(self, other); - throw_not_implemented("greater_equal"); -} - -/*---------------------------------------------- - definition of self < other - ----------------------------------------------*/ -std::string less_than_docstr = R"pbdoc( - Returns self < other, element-wise. -)pbdoc"; -ir::value *less_than(ir::value *self, ir::value *other, ir::builder *builder) { - ir::type *scalar_ty = self->get_type()->get_scalar_ty(); - // float < float - if (scalar_ty->is_floating_point_ty()) - return builder->create_fcmpOLT(self, other); - // int < int - else if (scalar_ty->is_integer_ty()) - return builder->create_icmpSLT(self, other); - throw_not_implemented("less_than"); -} - -/*---------------------------------------------- - definition of self <= other - ----------------------------------------------*/ -std::string less_equal_docstr = R"pbdoc( - Returns self <= other, element-wise. -)pbdoc"; -ir::value *less_equal(ir::value *self, ir::value *other, ir::builder *builder) { - ir::type *scalar_ty = self->get_type()->get_scalar_ty(); - // float < float - if (scalar_ty->is_floating_point_ty()) - return builder->create_fcmpOLE(self, other); - // int < int - else if (scalar_ty->is_integer_ty()) - return builder->create_icmpSLE(self, other); - throw_not_implemented("less_equal"); -} - -/*---------------------------------------------- - definition of self == other - ----------------------------------------------*/ -std::string equal_docstr = R"pbdoc( - Returns self == other, element-wise. -)pbdoc"; -ir::value *equal(ir::value *self, ir::value *other, ir::builder *builder) { - ir::type *scalar_ty = self->get_type()->get_scalar_ty(); - // float == float - if (scalar_ty->is_floating_point_ty()) - return builder->create_fcmpOEQ(self, other); - // int == int - else if (scalar_ty->is_integer_ty()) - return builder->create_icmpEQ(self, other); - throw_not_implemented("equal"); -} - -/*---------------------------------------------- - definition of self / other - ----------------------------------------------*/ -std::string _div_docstr = R"pbdoc( - Returns self / other, element-wise. -)pbdoc"; -ir::value *_div(ir::value *self, ir::value *other, ir::builder *builder) { - ir::type *scalar_ty = self->get_type()->get_scalar_ty(); - // float / float - if (scalar_ty->is_floating_point_ty()) - return builder->create_fdiv(self, other); - // int / int - else if (scalar_ty->is_integer_ty()) - return builder->create_sdiv(self, other); - throw_not_implemented("div"); -} - -/*---------------------------------------------- - definition of self % other - ----------------------------------------------*/ -std::string mod_docstr = R"pbdoc( - Returns self % other, element-wise. -)pbdoc"; -ir::value *mod(ir::value *self, ir::value *other, ir::builder *builder) { - ir::type *scalar_ty = self->get_type()->get_scalar_ty(); - // float % int - if (scalar_ty->is_floating_point_ty()) - return builder->create_frem(self, other); - // int % int - else if (scalar_ty->is_integer_ty()) - return builder->create_srem(self, other); - throw_not_implemented("mod"); -} - -/*---------------------------------------------- - definition of self & other - ----------------------------------------------*/ -std::string _and_docstr = R"pbdoc( - Returns self & other, element-wise. -)pbdoc"; -ir::value *_and(ir::value *self, ir::value *other, ir::builder *builder) { - return builder->create_and(self, other); -} - -/*---------------------------------------------- - definition of minimum(self, other) - ----------------------------------------------*/ -std::string minimum_docstr = R"pbdoc( - Returns element-wise minimum of self and other -)pbdoc"; -ir::value *minimum(ir::value *self, ir::value *other, ir::builder *builder) { - return where(less_than(self, other, builder), self, other, builder); -} - -/*---------------------------------------------- - definition of self[slices] - ----------------------------------------------*/ - -enum slice_mode_t { - NEWAXIS, - ALL -}; - -std::string subscript_docstr = R"pbdoc( - returns self[slices]. - - :param slices: The slices to subscript with. - :type slices: List of `None` or `:` slices. -)pbdoc"; -ir::value *subscript(ir::value *self, std::vector slices, ir::builder *builder) { - std::vector modes; - for (py::object slice : slices) { - py::object none = py::none(); - py::object all = py::make_tuple(none, none, none); - if (slice.is(none)) - modes.push_back(NEWAXIS); - else if (all.attr("__eq__")(slice)) - modes.push_back(ALL); - else - throw std::runtime_error("slice must be None or (None, None, None)"); - } - - ir::type::block_shapes_t shape; - size_t curr = 0; - for (slice_mode_t mode : modes) { - if (mode == NEWAXIS) - shape.push_back(1); - else { - assert(mode == ALL); - shape.push_back(self->get_type()->get_block_shapes()[curr++]); - } - } - return builder->create_reshape(self, shape); -} diff --git a/python/src/main.cc b/python/src/main.cc index 48fc69e0de1e..d09679727030 100644 --- a/python/src/main.cc +++ b/python/src/main.cc @@ -8,8 +8,4 @@ void init_cutlass(pybind11::module &m); PYBIND11_MODULE(libtriton, m) { m.doc() = "Python bindings to the C++ Triton API"; init_triton(m); - init_superblocking(m); -#ifdef WITH_CUTLASS_BINDINGS - init_cutlass(m); -#endif } diff --git a/python/src/superblock.cc b/python/src/superblock.cc deleted file mode 100644 index 35b7e9de4232..000000000000 --- a/python/src/superblock.cc +++ /dev/null @@ -1,119 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#ifdef _OPENMP -#include -#endif - -// row-major 3d tensor -class tensor_3d { -public: - tensor_3d(int size_0, int size_1, int size_2, int *data = nullptr) : data_(size_0 * size_1 * size_2, 0) { - if (data) - std::copy(data, data + data_.size(), data_.begin()); - stride_0_ = size_1 * size_2; - stride_1_ = size_2; - stride_2_ = 1; - } - - int &operator()(int i, int j, int k) { - return data_[i * stride_0_ + j * stride_1_ + k]; - } - -private: - std::vector data_; - int stride_0_; - int stride_1_; - int stride_2_; -}; - -std::vector segment_blocks(tensor_3d &layout, tensor_3d &idx, int max_width, int H, int M, int N) { - tensor_3d tmp(H, M, N); - std::vector current(H, 0); - int num = 0; - std::vector lut(H * M * N * 4); - for (size_t h = 0; h < H; h++) { - // surrounding indices - std::vector ii_left(max_width, -1); - std::vector> ii_top(max_width, std::vector(N, -1)); - // start the dynamic programming algorithm - for (size_t m = 0; m < M; m++) { - for (size_t n = 0; n < N; n++) { - int v = layout(h, m, n); - if (v == 0) - continue; - int n_left = ii_left[max_width - 1]; - int m_top = ii_top[max_width - 1][n]; - int top = (m_top >= 0) ? tmp(h, m_top, n) : 0; - int left = (n_left >= 0) ? tmp(h, m, n_left) : 0; - int topleft = (m_top >= 0 && n_left >= 0) ? tmp(h, m_top, n_left) : 0; - int width = std::min(left, std::min(top, topleft)) + 1; - // reset width if blocks cannot be - // packed together (i.e., there's a 1 "in the middle") - for (int nn = n_left + 1; nn < n; nn++) - if (ii_top[max_width - 1][nn] > ii_top[max_width - 1][n]) - width = 1; - tmp(h, m, n) = width; - // update n_left ring buffer - for (int k = 0; k < max_width - 1; k++) - ii_left[k] = ii_left[k + 1]; - ii_left[max_width - 1] = n; - // update ii_top ring buffer - for (int k = 0; k < max_width - 1; k++) - ii_top[k][n] = ii_top[k + 1][n]; - ii_top[max_width - 1][n] = m; - // block is too small -- skip - if (width != max_width) - continue; - // retained blocks are set to zeros - for (size_t km = 0; km < max_width; km++) - for (size_t kn = 0; kn < max_width; kn++) { - int mm = ii_top[km][n]; - int nn = ii_left[kn]; - if (mm < 0 || nn < 0) - continue; - layout(h, mm, nn) = 0; - tmp(h, mm, nn) = 0; - lut[num++] = (int)h; - lut[num++] = (int)mm; - lut[num++] = (int)nn; - lut[num++] = idx(h, mm, nn); - } - } - } - } - lut.resize(num); - return lut; -} - -typedef std::pair> lut_t; - -std::vector superblock(uintptr_t LAYOUT, int H, int M, int N, int start_width) { - std::vector ret; - int current = 0; - tensor_3d layout(H, M, N, (int *)LAYOUT); - tensor_3d idx(H, M, N); - for (int64_t h = 0; h < H; h++) - for (int64_t m = 0; m < M; m++) - for (int64_t n = 0; n < N; n++) { - if (layout(h, m, n) == 0) - continue; - idx(h, m, n) = current++; - } - // create lut - for (int max_width = start_width; max_width > 0; max_width /= 2) { - auto lut = segment_blocks(layout, idx, max_width, H, M, N); - if (lut.size() == 0) - continue; - ret.push_back(std::make_pair(max_width, pybind11::array_t(lut.size(), lut.data()))); - } - return ret; -} - -void init_superblocking(pybind11::module &m) { - m.def("superblock", &superblock, "super-blocking for block-sparse matrix multiplication"); -} \ No newline at end of file diff --git a/python/src/triton.cc b/python/src/triton.cc index cb3fdbb6ea79..31f754add333 100644 --- a/python/src/triton.cc +++ b/python/src/triton.cc @@ -1,49 +1,52 @@ -#include "triton/codegen/pass.h" -#include "triton/codegen/target.h" -#include "triton/codegen/extern_lib.h" -#include "triton/driver/error.h" -#include "triton/driver/llvm.h" -#include "triton/ir/builder.h" -#include "triton/ir/enums.h" -#include "triton/ir/function.h" -#include "triton/ir/module.h" -#include "triton/ir/print.h" +#include "mlir/IR/Builders.h" +#include "mlir/IR/BuiltinOps.h" +#include "mlir/IR/MLIRContext.h" +#include "mlir/IR/Verifier.h" + +#include "mlir/Conversion/Passes.h" +#include "mlir/Pass/Pass.h" +#include "mlir/Pass/PassManager.h" +#include "mlir/Transforms/Passes.h" + +#include "mlir/Parser.h" +#include "mlir/Support/FileUtilities.h" + +#include "mlir/Dialect/LLVMIR/LLVMDialect.h" +#include "triton/Analysis/Allocation.h" +#include "triton/Conversion/TritonGPUToLLVM/TritonGPUToLLVMPass.h" +#include "triton/Conversion/TritonToTritonGPU/TritonToTritonGPUPass.h" +#include "triton/Dialect/Triton/IR/Dialect.h" +#include "triton/Dialect/Triton/IR/Types.h" +#include "triton/Dialect/Triton/Transforms/Passes.h" +#include "triton/Dialect/TritonGPU/Transforms/Passes.h" +#include "triton/Target/LLVMIR/LLVMIRTranslation.h" +#include "triton/Target/PTX/PTXTranslation.h" +#include "triton/Tools/Sys/GetEnv.hpp" + +#include "llvm/IR/LegacyPassManager.h" +#include "llvm/IR/Module.h" +#include "llvm/IR/Verifier.h" +#include "llvm/IRReader/IRReader.h" +#include "llvm/Support/FileUtilities.h" +#include "llvm/Support/raw_ostream.h" + +#include "llvm/Support/SourceMgr.h" + +#include +#include +#include #include #include #include #include -#include #include -#include "Python.h" +#include #include #include #include #include -#include "llvm/IR/Module.h" -#include "llvm/IR/Verifier.h" namespace py = pybind11; -namespace ir = triton::ir; -namespace drv = triton::driver; - - -/*****************************************************************************/ -/* Python bindings for triton::driver */ -/*****************************************************************************/ -// information query -template -int cuGetInfo(CUdevice device) { - int res; - drv::dispatch::cuDeviceGetAttribute(&res, attr, device); - return res; -} - -template -int hipGetInfo(hipDevice_t device) { - int res; - drv::dispatch::hipDeviceGetAttribute(&res, attr, device); - return res; -} enum backend_t { HOST, @@ -51,492 +54,15 @@ enum backend_t { ROCM, }; -void cu_enable_peer_access(uint64_t peer_ptr){ - CUcontext context; - drv::dispatch::cuPointerGetAttribute(&context, CU_POINTER_ATTRIBUTE_CONTEXT, peer_ptr); - try { - drv::dispatch::cuCtxEnablePeerAccess(context, 0); - } catch (drv::exception::cuda::peer_access_already_enabled) {} -} - -void host_enqueue(uint64_t stream, uint64_t kernel, - uint64_t grid_0, uint64_t grid_1, uint64_t grid_2, - uint64_t block_0, uint64_t block_1, uint64_t block_2, - void* args_ptr, size_t args_size, int64_t shared_mem){ - throw std::runtime_error("unsupported"); -// auto hst = kernel->module()->hst(); -// hst_->futures->reserve(hst_->futures->size() + grid[0]*grid[1]*grid[2]); -// char* params = new char[args_size]; -// std::memcpy((void*)params, (void*)args, args_size); -// for(size_t i = 0; i < grid[0]; i++) -// for(size_t j = 0; j < grid[1]; j++) -// for(size_t k = 0; k < grid[2]; k++) -// hst_->futures->emplace_back(hst_->pool->enqueue(hst->fn, (char**)params, int32_t(i), int32_t(j), int32_t(k))); -} - -void cu_enqueue(uint64_t stream, uint64_t kernel, - uint64_t grid_0, uint64_t grid_1, uint64_t grid_2, - uint64_t block_0, uint64_t block_1, uint64_t block_2, - void* args_ptr, size_t args_size, int64_t shared_mem){ - void *config[] = { - CU_LAUNCH_PARAM_BUFFER_POINTER, (void*)args_ptr, - CU_LAUNCH_PARAM_BUFFER_SIZE, &args_size, - CU_LAUNCH_PARAM_END - }; - drv::dispatch::cuLaunchKernel((CUfunction)kernel, grid_0, grid_1, grid_2, - block_0, block_1, block_2, - shared_mem, (CUstream)stream, nullptr, config); -} - -void hip_enqueue(uint64_t stream, uint64_t kernel, - uint64_t grid_0, uint64_t grid_1, uint64_t grid_2, - uint64_t block_0, uint64_t block_1, uint64_t block_2, - void* args_ptr, size_t args_size, int64_t shared_mem) { - void *config[] = { - HIP_LAUNCH_PARAM_BUFFER_POINTER, (void*)args_ptr, - HIP_LAUNCH_PARAM_BUFFER_SIZE, &args_size, - HIP_LAUNCH_PARAM_END - }; - drv::dispatch::hipModuleLaunchKernel((hipFunction_t)kernel, grid_0, grid_1, grid_2, - block_0, block_1, block_2, - shared_mem, (hipStream_t)stream, nullptr, config); - -} - -long pow2_divisor(long N){ - if(N % 16 == 0) return 16; - if(N % 8 == 0) return 8; - if(N % 4 == 0) return 4; - if(N % 2 == 0) return 2; - return 1; -} - -// Returns something like "int16", whether dtype is a torch.dtype or -// triton.language.dtype. -std::string dtype_cache_key_part(const py::object& dtype) { - if (py::hasattr(dtype, "cache_key_part")) { - // Presumed to be a triton.language.dtype. - return std::string(py::str(py::getattr(dtype, "cache_key_part"))); - } else { - // Remove 'torch.' prefix from repr of torch.dtype. - py::object repr = py::repr(dtype); - size_t repr_len = PyUnicode_GET_LENGTH(repr.ptr()); - const char* repr_ptr = (const char*)PyUnicode_1BYTE_DATA(repr.ptr()); - if (repr_len <= 6 || strncmp(repr_ptr, "torch.", 6)) { - throw std::logic_error("invalid dtype: " + std::string(repr_ptr, repr_len)); - } - return std::string(repr_ptr + 6, repr_len - 6); - } -} - -size_t get_pointer_range_size(uint64_t addr){ - if(addr == 0) - return 0; - size_t size; - drv::dispatch::cuPointerGetAttribute(&size, CU_POINTER_ATTRIBUTE_RANGE_SIZE, (CUdeviceptr)addr); - return size; -} - -// Launch -void parse_args(py::list& args, py::list do_not_specialize, const std::string& func_key, py::list& arg_names, - std::string& cache_key, std::string& params, size_t& params_size, py::dict constants, - int num_warps, int num_stages, py::dict& extern_libs) { - size_t len = PyList_Size(args.ptr()); - params.reserve(8*len); // 8 max bytes by argument - char* params_ptr = ¶ms[0]; - cache_key = func_key; - cache_key += "-" + std::to_string(num_warps); - cache_key += "-" + std::to_string(num_stages); - cache_key += "-"; - for(int i = 0; i < len; i++){ - cache_key += "_"; - py::int_ py_i = py::int_(i); - bool specialize = !do_not_specialize.contains(py_i); - py::object arg = args[i]; - auto arg_ptr = arg.ptr(); - - // argument is `long` - if(PyLong_Check(arg_ptr)){ - int overflow; - long long value = PyLong_AsLongLongAndOverflow(arg_ptr, &overflow); - // values equal to 1 are specialized - if(specialize && (value == 1)){ - cache_key += "1"; - continue; - } - // int32, uint32, int64, and uint64 have different kernels - if (!overflow && -0x8000'0000LL <= value && value <= 0x7FFF'FFFFLL) { - cache_key += "int32"; - params_ptr = (char*)(((uintptr_t)params_ptr + 3) & (-4)); - std::memcpy(params_ptr, &value, 4); - params_ptr += 4; - } else if (!overflow && 0x8000'0000LL <= value && value <= 0xFFFF'FFFFLL) { - cache_key += "uint32"; - params_ptr = (char*)(((uintptr_t)params_ptr + 3) & (-4)); - std::memcpy(params_ptr, &value, 4); - params_ptr += 4; - } else if (!overflow) { - cache_key += "int64"; - params_ptr = (char*)(((uintptr_t)params_ptr + 7) & (-8)); - std::memcpy(params_ptr, &value, 8); - params_ptr += 8; - } else { - if (PyErr_Occurred()) { - throw std::logic_error("An error occurred?"); - } - unsigned long long unsigned_value = PyLong_AsUnsignedLongLong(arg_ptr); - if (PyErr_Occurred()) { - throw std::runtime_error("integer overflow in argument: " + std::string(py::str(arg))); - } - cache_key += "uint64"; - params_ptr = (char*)(((uintptr_t)params_ptr + 7) & (-8)); - std::memcpy(params_ptr, &unsigned_value, 8); - params_ptr += 8; - } - if(!specialize) - continue; - // values divisible by small powers of 2 are specialized - cache_key += "[multipleof("; - cache_key += std::to_string(pow2_divisor(value)); - cache_key += ")]"; - continue; - } - // argument is `float` - if(PyFloat_Check(arg_ptr)){ - cache_key += "float32"; - float value = PyFloat_AsDouble(arg_ptr); - params_ptr = (char*)(((uintptr_t)params_ptr + 3) & (-4)); - std::memcpy(params_ptr, &value, 4); - params_ptr += 4; - continue; - } - // argument is `bool` - if(PyBool_Check(arg_ptr)){ - cache_key += "bool"; - bool value = arg_ptr == Py_True ? true : false; - std::memcpy(params_ptr, &value, 1); - params_ptr += 1; - continue; - } - // argument is tensor - if(py::hasattr(arg, "data_ptr")){ - py::object data_ptr = arg.attr("data_ptr")(); - long value = data_ptr.cast(); - params_ptr = (char*)(((uintptr_t)params_ptr + 7) & (-8)); - // copy param - std::memcpy(params_ptr, &value, 8); - params_ptr += 8; - // update cache key - cache_key += dtype_cache_key_part(arg.attr("dtype")); - cache_key += "*"; - cache_key += "[multipleof("; - size_t range_size; - try { - range_size = get_pointer_range_size(value); - } catch (...) { - throw std::runtime_error("argument tensor #" + std::to_string(i) + " is not on cuda! " + std::string(py::str(arg))); - } - cache_key += std::to_string(std::min(pow2_divisor(value), pow2_divisor(range_size))); - cache_key += ")]"; - continue; - } - // argument is `constexpr` - if (py::hasattr(arg, "value")) { - py::object value = arg.attr("value"); - // check if value is a callable object using PyCallable_Check - if (PyCallable_Check(value.ptr())) { - throw std::runtime_error( - "constant argument cannot be a callable object: " + - std::string(py::str(arg))); - } - py::object name = arg_names[i]; - constants[name] = value; - py::object repr = py::repr(value); - const char* start = (const char*)PyUnicode_1BYTE_DATA(repr.ptr()); - size_t len = PyUnicode_GET_LENGTH(repr.ptr()); - cache_key += std::string(start, len); - continue; - } - std::string ty_str = arg.attr("__class__").attr("__name__").cast(); - if(ty_str == "NoneType"){ - cache_key += "None"; - continue; - } - std::string err_msg = "Received type '" + ty_str + "' for argument " + std::to_string(i) + "." - + " Only int, float, bool, torch.Tensor, and triton.language.constexpr are supported."; - throw std::runtime_error(err_msg); - } - params_size = (std::ptrdiff_t)(params_ptr - ¶ms[0]); - - for (auto item : extern_libs) { - cache_key += "-" + item.first.cast(); - cache_key += "_" + item.second.cast(); - } -} - -// - void init_triton_runtime(py::module &&m) { - - // m.def("current_stream", [](uint64_t device){ - // return (uint64_t)(c10::cuda::getCurrentCUDAStream(device).stream()); - // }); - // wrap backend_t py::enum_(m, "backend") - .value("HOST", HOST) - .value("CUDA", CUDA) - .value("ROCM", ROCM) - .export_values(); - - // enable peer-to-peer - m.def("enable_peer_access", [](backend_t backend, uint64_t peer_ptr) { - if (backend != CUDA) - throw std::runtime_error("P2P only supported on CUDA devices!"); - cu_enable_peer_access(peer_ptr); - } - ); - - // get range size for the given pointer - m.def("get_pointer_range_size", &get_pointer_range_size); - - - // cache key - m.def("launch", [](py::list args, py::list do_not_specialize, const std::string& func_key, py::list& arg_names, - py::object device, py::int_ stream, py::dict bin_cache, py::int_ num_warps, py::int_ num_stages, - py::dict extern_libs, py::function add_to_cache, py::object grid){ - // parse arguments to compute cache key, compile-time constants and packed kernel arguments - long _num_warps = PyLong_AsLong(num_warps.ptr()); - long _num_stages = PyLong_AsLong(num_stages.ptr()); - std::string cache_key; - std::string params; - size_t params_size; - py::dict constants; - parse_args(args, do_not_specialize, func_key, arg_names, cache_key, params, - params_size, constants, _num_warps, _num_stages, extern_libs); - - // get cached binary - py::str key(cache_key); - py::bool_ noop = false; - if(!bin_cache.contains(key)) { - noop = add_to_cache(key, args, device, num_warps, num_stages, extern_libs); - } - if (noop) - return (py::object)py::none(); - py::object bin = bin_cache[key]; - - // get grid - py::sequence seq; - if(!PySequence_Check(grid.ptr())) - seq = grid(constants); - else - seq = grid; - int size = seq.size(); - int grid_0 = py::cast(seq[0]); - int grid_1 = size < 2 ? 1 : py::cast(seq[1]); - int grid_2 = size < 3 ? 1 : py::cast(seq[2]); - - // enqueue - uint64_t kernel = py::cast(bin.attr("kernel")); - uint64_t shared_mem = py::cast(bin.attr("shared_mem")); - - // actually launch - void *config[] = { - CU_LAUNCH_PARAM_BUFFER_POINTER, params.data(), - CU_LAUNCH_PARAM_BUFFER_SIZE, ¶ms_size, - CU_LAUNCH_PARAM_END - }; - uint64_t _stream = PyLong_AsLong(stream.ptr()); - if(grid_0*grid_1*grid_2 > 0) { - // release the gil in case the enqueue blocks - // cuda will block if too many ops are enqueued - py::gil_scoped_release allow_threads; - drv::dispatch::cuLaunchKernel((CUfunction)kernel, grid_0, grid_1, grid_2, - _num_warps*32, 1, 1, shared_mem, (CUstream)_stream, - nullptr, config); - } - return bin; - }); - - m.def("cc", [](backend_t backend, uint64_t device) -> int { - if (backend == CUDA) { - CUdevice dev = (CUdevice)device; - int major = cuGetInfo(dev); - int minor = cuGetInfo(dev); - return major*10 + minor; - } - return -1; - }); - - // query maximum shared memory - m.def("max_shared_memory", [](backend_t backend, uint64_t device) { - if (backend == HOST) - return 0; - if(backend == CUDA) - return cuGetInfo(device); - if(backend == ROCM) - return hipGetInfo(device); - return -1; - }); - - // query DRAM & L2 cache - m.def("memory_clock_rate", [](backend_t backend, uint64_t device) { - if (backend == CUDA) return cuGetInfo(device); - return -1; - }); - m.def("global_memory_bus_width", [](backend_t backend, uint64_t device) { - if (backend == CUDA) return cuGetInfo(device); - return -1; - }); - m.def("l2_cache_size", [](backend_t backend, uint64_t device) { - if (backend == CUDA) return cuGetInfo(device); - return -1; - }); - - // query clock rate (in kilohertz) - m.def("clock_rate", [](backend_t backend, uint64_t device) { - if (backend == CUDA) return cuGetInfo(device); - return -1; - }); - - m.def("num_sm", [](backend_t backend, uint64_t device) { - if (backend == CUDA) return cuGetInfo(device); - return -1; - }); - - // enqueue - m.def("enqueue", [](backend_t backend, uint64_t stream, uint64_t kernel, - uint64_t grid_0, uint64_t grid_1, uint64_t grid_2, - uint64_t block_0, uint64_t block_1, uint64_t block_2, - const std::string &args, int64_t shared_mem){ - void* args_ptr = (void*)args.data(); - size_t args_size = args.size(); - // release the gil in case the enqueue blocks - // cuda will block if too many ops are enqueued - py::gil_scoped_release allow_threads; - if(backend == HOST) - host_enqueue(stream, kernel, grid_0, grid_1, grid_2, block_0, block_1, block_2, args_ptr, args_size, shared_mem); - if(backend == CUDA) - cu_enqueue(stream, kernel, grid_0, grid_1, grid_2, block_0, block_1, block_2, args_ptr, args_size, shared_mem); - if(backend == ROCM) - hip_enqueue(stream, kernel, grid_0, grid_1, grid_2, block_0, block_1, block_2, args_ptr, args_size, shared_mem); - }); - - -} - -/*****************************************************************************/ -/* Python bindings for triton::codegen */ -/*****************************************************************************/ -typedef std::map asm_map_t; - -// --------------------------------------- -// Compile Triton-IR to assembly -// --------------------------------------- - -void init_triton_codegen(py::module &&m) { - m.def("compile_ttir", - [](backend_t backend, ir::module &ir, uint64_t device, int num_warps, int num_stages, py::dict& extern_libs, size_t cc) { - std::ostringstream ttir; - int n_shared_bytes; - std::string tmp; - std::string ptx; - std::string cubin; - std::string name; - { // Scope where the GIL is released - py::gil_scoped_release allow_threads; - name = ir.get_function_list()[0]->get_name(); - ir.print(ttir); - llvm::LLVMContext ctx; - // construct extern lib map - triton::codegen::ExternLibMap extern_lib_map; - for (auto item : extern_libs) { - auto name = item.first.cast(); - auto path = item.second.cast(); - extern_lib_map.emplace( - name, triton::codegen::create_extern_lib(name, path)); - } - // device properties - if (cc == 0) { - CUdevice dev = (CUdevice)device; - size_t major = cuGetInfo(dev); - size_t minor = cuGetInfo(dev); - cc = major*10 + minor; - } - int version; - std::string ptxas_path = drv::path_to_ptxas(version); - // Triton-IR -> NVPTX LLVM-IR - triton::codegen::nvidia_cu_target target(cc); - auto llvm = triton::codegen::add_passes_to_emit_bin( - ir, ctx, &target, num_warps, num_stages, n_shared_bytes, extern_lib_map); - llvm::raw_string_ostream llir(tmp); - llir << *llvm; - llir.flush(); - // LLVM-IR -> PTX - ptx = drv::llir_to_ptx(llvm.get(), cc, version); - // PTX -> Binary - cubin = drv::ptx_to_cubin(ptx, ptxas_path, cc); - } - asm_map_t asm_map; - asm_map["ttir"] = py::cast(ttir.str()); - asm_map["llir"] = py::cast(tmp); - asm_map["ptx"] = py::cast(ptx); - - if(!cubin.empty()){ - py::bytes bytes(cubin); - asm_map["cubin"] = bytes; - } - return std::make_tuple(name, asm_map, n_shared_bytes); - }, - py::return_value_policy::take_ownership); - - - // --------------------------------------- - // Load provided assembly code into driver - // --------------------------------------- - m.def("load_binary", [](const std::string& name, const std::string& data, size_t n_shared_bytes, uint64_t device){ - py::gil_scoped_release allow_threads; - // create driver handles - CUfunction fun; - CUmodule mod; - drv::dispatch::cuModuleLoadData(&mod, data.c_str()); - drv::dispatch::cuModuleGetFunction(&fun, mod, name.c_str()); - // get allocated registers and spilled registers from the function - int n_regs = 0; - int n_spills = 0; - drv::dispatch::cuFuncGetAttribute(&n_regs, CU_FUNC_ATTRIBUTE_NUM_REGS, fun); - drv::dispatch::cuFuncGetAttribute(&n_spills, CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES, fun); - n_spills /= 4; - // set dynamic shared memory if necessary - int shared_optin; - drv::dispatch::cuDeviceGetAttribute(&shared_optin, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN, device); - if(n_shared_bytes > 49152 && shared_optin > 49152){ - drv::dispatch::cuFuncSetCacheConfig(fun, CU_FUNC_CACHE_PREFER_SHARED); - int shared_total, shared_static; - drv::dispatch::cuDeviceGetAttribute(&shared_total, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR, device); - drv::dispatch::cuFuncGetAttribute(&shared_static, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, fun); - drv::dispatch::cuFuncSetAttribute(fun, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, shared_optin - shared_static); - } - return std::make_tuple((uint64_t)mod, (uint64_t)fun, (uint64_t)n_regs, (uint64_t)n_spills); - }, - py::return_value_policy::take_ownership - ); - - - struct InstanceDescriptor - { - std::unordered_set divisibleBy16; - std::unordered_set equalTo1; - }; - - py::class_(m, "instance_descriptor") - .def(py::init<>()) - .def(py::init, std::unordered_set>()) - .def_readonly("divisible_by_16", &InstanceDescriptor::divisibleBy16) - .def_readonly("equal_to_1", &InstanceDescriptor::equalTo1); + .value("HOST", HOST) + .value("CUDA", CUDA) + // .value("ROCM", ROCM) + .export_values(); } - /*****************************************************************************/ /* Python bindings for triton::ir */ /*****************************************************************************/ @@ -545,421 +71,1385 @@ void init_triton_ir(py::module &&m) { using ret = py::return_value_policy; using namespace pybind11::literals; - py::enum_(m, "CACHE_MODIFIER") - .value("NONE", ir::load_inst::NONE) - .value("CA", ir::load_inst::CA) - .value("CG", ir::load_inst::CG) + py::enum_(m, "CACHE_MODIFIER") + .value("NONE", mlir::triton::CacheModifier::NONE) + .value("CA", mlir::triton::CacheModifier::CA) + .value("CG", mlir::triton::CacheModifier::CG) .export_values(); - py::enum_(m, "EVICTION_POLICY") - .value("NORMAL", ir::load_inst::NORMAL) - .value("EVICT_FIRST", ir::load_inst::EVICT_FIRST) - .value("EVICT_LAST", ir::load_inst::EVICT_LAST) + py::enum_(m, "EVICTION_POLICY") + .value("NORMAL", mlir::triton::EvictionPolicy::NORMAL) + .value("EVICT_FIRST", mlir::triton::EvictionPolicy::EVICT_FIRST) + .value("EVICT_LAST", mlir::triton::EvictionPolicy::EVICT_LAST) .export_values(); - py::enum_(m, "REDUCE_OP") - .value("ADD", ir::reduce_inst::ADD) - .value("FADD", ir::reduce_inst::FADD) - .value("MIN", ir::reduce_inst::MIN) - .value("MAX", ir::reduce_inst::MAX) - .value("UMIN", ir::reduce_inst::UMIN) - .value("UMAX", ir::reduce_inst::UMAX) - .value("ARGMIN", ir::reduce_inst::ARGMIN) - .value("ARGMAX", ir::reduce_inst::ARGMAX) - .value("ARGUMIN", ir::reduce_inst::ARGUMIN) - .value("ARGUMAX", ir::reduce_inst::ARGUMAX) - .value("FMIN", ir::reduce_inst::FMIN) - .value("FMAX", ir::reduce_inst::FMAX) - .value("ARGFMIN", ir::reduce_inst::ARGFMIN) - .value("ARGFMAX", ir::reduce_inst::ARGFMAX) - .value("XOR", ir::reduce_inst::XOR); - - py::enum_(m, "ATOMIC_OP") - .value("ADD", ir::atomic_rmw_op_t::Add) - .value("FADD", ir::atomic_rmw_op_t::FAdd) - .value("AND", ir::atomic_rmw_op_t::And) - .value("OR", ir::atomic_rmw_op_t::Or) - .value("XOR", ir::atomic_rmw_op_t::Xor) - .value("XCHG", ir::atomic_rmw_op_t::Xchg) - .value("MAX", ir::atomic_rmw_op_t::Max) - .value("MIN", ir::atomic_rmw_op_t::Min) - .value("UMIN", ir::atomic_rmw_op_t::UMin) - .value("UMAX", ir::atomic_rmw_op_t::UMax); - - py::class_(m, "context") - .def(py::init<>()); - - py::class_(m, "value") - .def("multiple_of", [](ir::value *self, std::vector val) { - if (auto *instr = dynamic_cast(self)) { - instr->set_metadata(ir::metadata::multiple_of, val); - } else - throw std::runtime_error("multiple_of"); - }) - .def("max_contiguous", [](ir::value *self, std::vector val) { - if (auto *instr = dynamic_cast(self)) { - instr->set_metadata(ir::metadata::max_contiguous, val); - } else - throw std::runtime_error("max_contiguous"); - }) - .def("set_fdiv_ieee_rounding", [](ir::value *self, bool val) { - if (auto *instr = dynamic_cast(self)) - instr->set_fdiv_ieee_rounding(val); - else - throw std::runtime_error("set_fdiv_ieee_rounding"); - }) - .def("is_phi", [](ir::value *self) { - if (auto *pn = dynamic_cast(self)) - return true; - return false; - }) - .def("ops", [](ir::value *self) { - if (auto *instr = dynamic_cast(self)) { - return instr->ops(); - } - throw std::runtime_error("cannot use ops()"); - }) - .def("replace_all_uses_with", &ir::value::replace_all_uses_with) - .def("erase_from_parent", [](ir::value *self) { - if (auto *instr = dynamic_cast(self)) - return instr->erase_from_parent(); - throw std::runtime_error("cannot use erase_from_parent"); - }) - .def_property("name", &ir::value::get_name, &ir::value::set_name) - .def_property_readonly("type", &ir::value::get_type); - - py::class_(m, "user"); - - py::class_(m, "constant") - .def("get_null_value", &ir::constant::get_null_value, ret::reference) - .def("get_all_ones_value", &ir::constant::get_all_ones_value, ret::reference); - - py::class_(m, "undef") - .def("get", &ir::undef_value::get, ret::reference); - - py::class_(m, "constant_int") - .def_property_readonly("value", &ir::constant_int::get_value) - .def("__int__", [](ir::constant_int *self) { return self->get_value(); }) - .def("__bool__", [](ir::constant_int *self) { return self->get_value(); }); - - py::class_(m, "constant_float") - .def_property_readonly("value", &ir::constant_fp::get_value) - .def("get", [](ir::type* ty, double val) { return ir::constant_fp::get(ty, val); }, ret::reference); - - py::class_(m, "instruction") - .def("get_parent", [](ir::instruction *self) { - return self->get_parent(); - }, ret::reference); - py::class_(m, "phi_node") - .def("add_incoming", &ir::phi_node::add_incoming); - - py::class_(m, "type") - .def("make_ptr", &ir::pointer_type::get, ret::reference) - .def("make_function", &ir::function_type::get, ret::reference) - .def("make_block", &ir::block_type::get, ret::reference) - .def("get_void", &ir::type::get_void_ty, ret::reference) - .def("get_fp8", &ir::type::get_fp8_ty, ret::reference) - .def("get_fp16", &ir::type::get_fp16_ty, ret::reference) - .def("get_bf16", &ir::type::get_bf16_ty, ret::reference) - .def("get_fp32", &ir::type::get_fp32_ty, ret::reference) - .def("get_fp64", &ir::type::get_fp64_ty, ret::reference) - .def("get_int1", &ir::type::get_int1_ty, ret::reference) - .def("get_int8", &ir::type::get_int8_ty, ret::reference) - .def("get_int16", &ir::type::get_int16_ty, ret::reference) - .def("get_int32", &ir::type::get_int32_ty, ret::reference) - .def("get_int64", &ir::type::get_int64_ty, ret::reference) - .def("get_fp_mantissa_width", &ir::type::get_fp_mantissa_width, ret::reference) - - .def("get_block_shapes", &ir::type::get_block_shapes) - - .def("is_ptr", &ir::type::is_pointer_ty) - .def("is_int", static_cast(&ir::type::is_integer_ty)) - .def("is_floating", &ir::type::is_floating_point_ty) - .def("is_block", &ir::type::is_block_ty) - .def("is_struct", &ir::type::is_struct_ty) - .def("is_void", &ir::type::is_void_ty) - .def("is_bool", &ir::type::is_bool_ty) - .def("is_fp8", &ir::type::is_fp8_ty) - .def("is_fp16", &ir::type::is_fp16_ty) - .def("is_bf16", &ir::type::is_bf16_ty) - .def("is_fp32", &ir::type::is_fp32_ty) - .def("is_fp64", &ir::type::is_fp64_ty) - .def("is_int1", [](ir::type *self) { return self->is_integer_ty(1); }) - .def("is_int8", [](ir::type *self) { return self->is_integer_ty(8); }) - .def("is_int16", [](ir::type *self) { return self->is_integer_ty(16); }) - .def("is_int32", [](ir::type *self) { return self->is_integer_ty(32); }) - .def("is_int64", [](ir::type *self) { return self->is_integer_ty(64); }) - .def("is_int_or_tileint", &ir::type::is_int_or_tileint_ty) - - .def("repr", &ir::type::repr) - .def_property_readonly("fp_mantissa_width", &ir::type::get_fp_mantissa_width) - .def_property_readonly("scalar", &ir::type::get_scalar_ty) - .def_property_readonly("context", &ir::type::get_context, ret::reference) - .def_property_readonly("int_bitwidth", &ir::type::get_integer_bitwidth) - .def_property_readonly("primitive_bitwidth", &ir::type::get_primitive_size_in_bits); - - py::class_(m, "pointer_type") - .def_property_readonly("element", &ir::pointer_type::get_element_ty, ret::reference) - .def_property_readonly("address_space", &ir::pointer_type::get_pointer_address_space, ret::reference); - - py::class_(m, "function_type") - .def_property_readonly("ret_ty", &ir::function_type::get_return_ty) - .def_property_readonly("arg_tys", [](ir::function_type* self){ - return std::vector(self->params_begin(), self->params_end()); + py::enum_(m, "REDUCE_OP") + .value("ADD", mlir::triton::RedOp::ADD) + .value("FADD", mlir::triton::RedOp::FADD) + .value("MIN", mlir::triton::RedOp::MIN) + .value("MAX", mlir::triton::RedOp::MAX) + .value("UMIN", mlir::triton::RedOp::UMIN) + .value("UMAX", mlir::triton::RedOp::UMAX) + .value("ARGMIN", mlir::triton::RedOp::ARGMIN) + .value("ARGMAX", mlir::triton::RedOp::ARGMAX) + .value("ARGUMIN", mlir::triton::RedOp::ARGUMIN) + .value("ARGUMAX", mlir::triton::RedOp::ARGUMAX) + .value("FMIN", mlir::triton::RedOp::FMIN) + .value("FMAX", mlir::triton::RedOp::FMAX) + .value("ARGFMIN", mlir::triton::RedOp::ARGFMIN) + .value("ARGFMAX", mlir::triton::RedOp::ARGFMAX) + .value("XOR", mlir::triton::RedOp::XOR); + + py::enum_(m, "ATOMIC_OP") + .value("ADD", mlir::triton::RMWOp::ADD) + .value("FADD", mlir::triton::RMWOp::FADD) + .value("AND", mlir::triton::RMWOp::AND) + .value("OR", mlir::triton::RMWOp::OR) + .value("XOR", mlir::triton::RMWOp::XOR) + .value("XCHG", mlir::triton::RMWOp::XCHG) + .value("MAX", mlir::triton::RMWOp::MAX) + .value("MIN", mlir::triton::RMWOp::MIN) + .value("UMIN", mlir::triton::RMWOp::UMIN) + .value("UMAX", mlir::triton::RMWOp::UMAX); + + py::class_(m, "context") + .def(py::init<>()) + .def("load_triton", [](mlir::MLIRContext &self) { + self.getOrLoadDialect(); + // we load LLVM because the frontend uses LLVM.undef for + // some placeholders + self.getOrLoadDialect(); + self.getOrLoadDialect(); + self.getOrLoadDialect(); + }); + // .def(py::init([](){ + // mlir::MLIRContext context; + // context.getOrLoadDialect(); + // // TODO: should we return a (raw/unique) pointer here? + // return context; + // })); + + // py::class_(m, "value") + // .def("multiple_of", [](ir::value *self, int val) { + // if (auto *instr = dynamic_cast(self)) { + // instr->set_metadata(ir::metadata::multiple_of, val); + // } else + // throw std::runtime_error("multiple_of"); + // }) + // .def("max_contiguous", [](ir::value *self, int val) { + // if (auto *instr = dynamic_cast(self)) { + // instr->set_metadata(ir::metadata::max_contiguous, val); + // } else + // throw std::runtime_error("max_contiguous"); + // }) + // .def("set_fdiv_ieee_rounding", [](ir::value *self, bool val) { + // if (auto *instr = dynamic_cast(self)) + // instr->set_fdiv_ieee_rounding(val); + // else + // throw std::runtime_error("set_fdiv_ieee_rounding"); + // }) + // .def("ops", [](ir::value *self) { + // if (auto *instr = dynamic_cast(self)) { + // return instr->ops(); + // } + // throw std::runtime_error("cannot use ops()"); + // }) + // .def("replace_all_uses_with", &ir::value::replace_all_uses_with) + // .def("erase_from_parent", [](ir::value *self) { + // if (auto *instr = dynamic_cast(self)) + // return instr->erase_from_parent(); + // throw std::runtime_error("cannot use erase_from_parent"); + // }) + // .def_property("name", &ir::value::get_name, &ir::value::set_name) + // .def_property_readonly("type", &ir::value::get_type); + + // // // Do we need under in TritonIR ? + // // py::class_(m, "undef") + // // .def("get", &ir::undef_value::get, ret::reference); + + py::class_(m, "type") + .def("is_integer", &mlir::Type::isInteger) + .def("is_fp16", &mlir::Type::isF16) + .def("__str__", [](mlir::Type &self) { + std::string str; + llvm::raw_string_ostream os(str); + self.print(os); + return os.str(); }); - py::class_(m, "integer_type"); - - py::class_(m, "block_type") - .def_property_readonly("shape", &ir::block_type::get_shapes) - .def_property_readonly("numel", &ir::type::get_tile_num_elements); - - py::class_(m, "struct_type") - .def("get", &ir::struct_type::get, ret::reference) - .def_property_readonly("num_types", &ir::struct_type::get_num_types); - - py::class_(m, "module", py::dynamic_attr()) - .def(py::init()) - .def("has_function", &ir::module::has_function) - .def("get_function", &ir::module::get_function, ret::reference) - .def("get_functions", &ir::module::get_function_list, ret::reference) - .def("get_or_insert_function", &ir::module::get_or_insert_function, ret::reference) - .def("print", [](ir::module *self) { - self->print(std::cout); - }) - .def("reset_ret_ty", &ir::module::reset_ret_ty) - .def("set_instr_metadata", [](ir::module *self, const std::string &name, ir::value *value) { - const auto metadatas = self->get_metadatas(); - auto it = metadatas.find(name); - if (it != metadatas.end()) - if (auto *instr = dynamic_cast(value)) { - instr->set_metadata(it->second.first, it->second.second); - } - }) - .def_property_readonly("builder", &ir::module::get_builder, ret::reference); - - using eattr = ir::attribute_kind_t; - py::enum_(m, "attribute_kind") - .value("readonly", eattr::readonly) - .value("writeonly", eattr::writeonly) - .value("noalias", eattr::noalias) - .value("aligned", eattr::aligned) - .value("multiple_of", eattr::multiple_of) - .value("retune", eattr::retune) - .value("not_implemented", eattr::not_implemented); - - py::class_(m, "attribute") - .def(py::init()) - .def_property_readonly("value", &ir::attribute::get_value); - - py::class_(m, "function") - .def_property_readonly("args", &ir::function::args) - .def_property_readonly("attrs", &ir::function::attrs) - .def("set_is_kernel", &ir::function::set_is_kernel) - .def("add_attr", &ir::function::add_attr) - .def("has_attr", &ir::function::has_attr) - .def("get_attrs", &ir::function::get_attributes); - - py::class_(m, "argument") - .def_property_readonly("parent", &ir::argument::get_parent, ret::reference) - .def_property_readonly("arg_no", &ir::argument::get_arg_no); - - py::class_(m, "basic_block") - .def("create", &ir::basic_block::create, ret::reference, py::arg(), py::arg(), py::arg() = nullptr) - .def("get_predecessors", &ir::basic_block::get_predecessors, ret::reference) - .def("get_first_non_phi", [](ir::basic_block *self) -> ir::instruction* { - ir::basic_block::iterator it = self->get_first_non_phi(); - if (it == self->end()) - return nullptr; - return *it; - }, ret::reference) - .def_property_readonly("parent", &ir::basic_block::get_parent, ret::reference); - - py::class_(m, "bb_iterator"); - - py::class_(m, "builder", py::dynamic_attr()) - .def(py::init()) - // getters - .def_property_readonly("context", &ir::builder::get_context, ret::reference) - // control flow - .def("call", &ir::builder::create_call, ret::reference) - .def("launch", &ir::builder::create_launch, ret::reference) - .def("br", &ir::builder::create_br, ret::reference) - .def("cond_br", &ir::builder::create_cond_br, ret::reference) - .def("ret_void", &ir::builder::create_ret_void, ret::reference) - .def("ret", &ir::builder::create_ret, ret::reference) - // insertion block/point, insert points are represented as (*bb, *instr) - .def("get_insert_block", &ir::builder::get_insert_block, ret::reference) - .def("set_insert_block", (void (ir::builder::*)(ir::basic_block *)) & ir::builder::set_insert_point) - .def("get_insert_point", [](ir::builder *self) { - ir::basic_block *bb = self->get_insert_block(); - ir::basic_block::iterator it = self->get_insert_point(); - ir::instruction *instr = it == bb->end() ? nullptr : *it; - return std::make_pair(bb, instr); - }, ret::reference) - .def("set_insert_point", [](ir::builder *self, std::pair pt) { - ir::basic_block *bb = pt.first; - ir::instruction *instr = pt.second; - if (instr) { - if (bb != instr->get_parent()) - throw std::runtime_error("invalid insertion point, instr not in bb"); - self->set_insert_point(instr); - } else { - assert(bb); - self->set_insert_point(bb); - } - }) + py::class_(m, "function_type") + .def("param_types", [](mlir::FunctionType &self) { + return std::vector(self.getInputs().begin(), + self.getInputs().end()); + }); + + py::class_(m, "value") + .def("set_attr", + [](mlir::Value &self, std::string &name, + mlir::Attribute &attr) -> void { + if (mlir::Operation *definingOp = self.getDefiningOp()) + definingOp->setAttr(name, attr); + else { + /* issue a warning */ + } + }) + .def("get_context", &mlir::Value::getContext) + .def("replace_all_uses_with", + [](mlir::Value &self, mlir::Value &newValue) { + self.replaceAllUsesWith(newValue); + }); + + py::class_(m, "block_argument"); + + py::class_(m, "region") + .def("get_parent_region", &mlir::Region::getParentRegion, ret::reference) + .def("size", [](mlir::Region &self) { return self.getBlocks().size(); }) + .def("empty", &mlir::Region::empty); + + py::class_(m, "block") + .def("arg", + [](mlir::Block &self, int index) -> mlir::BlockArgument { + return self.getArgument(index); + }) + .def("get_num_arguments", &mlir::Block::getNumArguments) + .def("dump", &mlir::Block::dump) + .def("move_before", &mlir::Block::moveBefore) + .def("insert_before", &mlir::Block::insertBefore) + .def("get_parent", &mlir::Block::getParent, ret::reference) + .def("merge_block_before", + [](mlir::Block &self, mlir::Block &dst) { + // ref: RewriterBase::mergeBlocks() + if (self.getNumArguments() != 0) + throw std::runtime_error( + "This block has arguments, don't merge"); + dst.getOperations().splice(dst.begin(), self.getOperations()); + self.dropAllUses(); + self.erase(); + }) + .def("replace_use_in_block_with", [](mlir::Block &self, mlir::Value &v, + mlir::Value &newVal) { + v.replaceUsesWithIf(newVal, [&](mlir::OpOperand &operand) { + mlir::Operation *user = operand.getOwner(); + mlir::Block *currentBlock = user->getBlock(); + while (currentBlock) { + if (currentBlock == &self) + return true; + // Move up one level + currentBlock = currentBlock->getParent()->getParentOp()->getBlock(); + } + return false; + }); + }); + + // using eattr = ir::attribute_kind_t; + // py::enum_(m, "attribute_kind") + // .value("readonly", eattr::readonly) + // .value("writeonly", eattr::writeonly) + // .value("noalias", eattr::noalias) + // .value("aligned", eattr::aligned) + // .value("multiple_of", eattr::multiple_of) + // .value("retune", eattr::retune) + // .value("not_implemented", eattr::not_implemented); + + py::class_(m, "attribute"); + py::class_(m, "integer_attr"); + py::class_(m, "bool_attr"); + + // Ops + py::class_(m, "OpState") + .def("set_attr", + [](mlir::OpState &self, std::string &name, + mlir::Attribute &attr) -> void { self->setAttr(name, attr); }) + .def( + "get_num_results", + [](mlir::OpState &self) -> unsigned { return self->getNumResults(); }) + .def("get_result", + [](mlir::OpState &self, unsigned idx) -> mlir::Value { + return self->getResult(idx); + }) + .def( + "get_region", + [](mlir::OpState &self, unsigned idx) -> mlir::Region & { + return self->getRegion(idx); + }, + ret::reference) + .def( + "get_body", + [](mlir::scf::ForOp &self, unsigned idx) -> mlir::Block * { + return self.getBody(idx); + }, + ret::reference) + .def("dump", [](mlir::OpState &self) { self->dump(); }) + .def("__str__", + [](mlir::OpState &self) -> std::string { + std::string str; + llvm::raw_string_ostream os(str); + self->print(os); + return str; + }) + .def("append_operand", + [](mlir::OpState &self, mlir::Value &val) { + self->insertOperands(self->getNumOperands(), val); + }) + .def("verify", [](mlir::OpState &self) -> bool { + return mlir::succeeded(mlir::verify(self.getOperation())); + }); + // scf Ops + py::class_(m, "ForOp") + .def("get_induction_var", &mlir::scf::ForOp::getInductionVar); + + py::class_(m, "IfOp") + .def("get_then_block", &mlir::scf::IfOp::thenBlock, ret::reference) + .def("get_else_block", &mlir::scf::IfOp::elseBlock, ret::reference) + .def("get_then_yield", &mlir::scf::IfOp::thenYield) + .def("get_else_yield", &mlir::scf::IfOp::elseYield); + py::class_(m, "YieldOp"); + py::class_(m, "WhileOp") + .def("get_before", &mlir::scf::WhileOp::getBefore, ret::reference) + .def("get_after", &mlir::scf::WhileOp::getAfter, ret::reference); + py::class_(m, "ConditionOp"); + + // dynamic_attr is used to transfer ownership of the MLIR context to the + // module + py::class_(m, "module", py::dynamic_attr()) + .def("dump", &mlir::ModuleOp::dump) + .def("str", + [](mlir::ModuleOp &self) -> std::string { + std::string str; + llvm::raw_string_ostream os(str); + self.print(os); + return str; + }) + .def("push_back", + [](mlir::ModuleOp &self, mlir::FuncOp &funcOp) -> void { + self.push_back(funcOp); + }) + .def("has_function", + [](mlir::ModuleOp &self, std::string &funcName) -> bool { + if (self.lookupSymbol(funcName)) + return true; + return false; + }) + .def("get_function", + [](mlir::ModuleOp &self, std::string &funcName) -> mlir::FuncOp { + return self.lookupSymbol(funcName); + }) + .def("get_single_function", [](mlir::ModuleOp &self) -> mlir::FuncOp { + llvm::SmallVector funcs; + self.walk([&](mlir::FuncOp func) { funcs.push_back(func); }); + if (funcs.size() != 1) + throw std::runtime_error("Expected a single function"); + return funcs[0]; + }); + + m.def("make_attr", + [](const std::vector &values, mlir::MLIRContext &context) { + return mlir::DenseIntElementsAttr::get( + mlir::RankedTensorType::get( + {static_cast(values.size())}, + mlir::IntegerType::get(&context, 32)), + values) + .cast(); + }); + + m.def( + "parse_mlir_module", + [](const std::string &inputFilename, mlir::MLIRContext &context) { + // initialize registry + // note: we initialize llvm for undef + mlir::DialectRegistry registry; + registry.insert(); + context.appendDialectRegistry(registry); + context.loadAllAvailableDialects(); + + // parse module + mlir::OwningOpRef module( + mlir::parseSourceFile(inputFilename, &context)); + // locations are incompatible with ptx < 7.5 ! + module->walk([](mlir::Operation *op) { + op->setLoc(mlir::UnknownLoc::get(op->getContext())); + }); + if (!module) + throw std::runtime_error("Parse MLIR file failed."); + + return module->clone(); + }, + ret::take_ownership); + + py::class_(m, "function") + // .def_property_readonly("attrs", &ir::function::attrs) + // .def("add_attr", &ir::function::add_attr); + .def("args", + [](mlir::FuncOp &self, unsigned idx) -> mlir::BlockArgument { + return self.getArgument(idx); + }) + .def( + "add_entry_block", + [](mlir::FuncOp &self) -> mlir::Block * { + return self.addEntryBlock(); + }, + ret::reference) + .def( + "set_arg_attr", + [](mlir::FuncOp &self, int arg_no, const std::string &name, int val) { + // set arg attributes "name" to value "val" + auto attrTy = mlir::IntegerType::get(self.getContext(), 32); + self.setArgAttr(arg_no, name, mlir::IntegerAttr::get(attrTy, val)); + }, + ret::reference) + .def_property_readonly("type", &mlir::FuncOp::getType) + .def("reset_type", &mlir::FuncOp::setType); + + py::class_(m, "InsertPoint"); + + py::class_(m, "builder", py::dynamic_attr()) + .def(py::init()) + // // getters + .def_property_readonly("context", &mlir::OpBuilder::getContext, + ret::reference) + .def("create_module", + [](mlir::OpBuilder &self) -> mlir::ModuleOp { + auto loc = self.getUnknownLoc(); + return self.create(loc); + }) + .def("ret", + [](mlir::OpBuilder &self, std::vector &vals) -> void { + auto loc = self.getUnknownLoc(); + self.create(loc, vals); + }) + .def("call", + [](mlir::OpBuilder &self, mlir::FuncOp &func, + std::vector &args) -> mlir::OpState { + auto loc = self.getUnknownLoc(); + return self.create(loc, func, args); + }) + // insertion block/point + .def("set_insertion_point_to_start", + [](mlir::OpBuilder &self, mlir::Block &block) -> void { + self.setInsertionPointToStart(&block); + }) + .def("set_insertion_point_to_end", + [](mlir::OpBuilder &self, mlir::Block &block) { + self.setInsertionPointToEnd(&block); + }) + .def( + "get_insertion_block", + [](mlir::OpBuilder &self) -> mlir::Block * { + return self.getInsertionBlock(); + }, + ret::reference) + .def("get_insertion_point", &mlir::OpBuilder::saveInsertionPoint) + .def("restore_insertion_point", &mlir::OpBuilder::restoreInsertionPoint) + // .def("set_insert_point", [](ir::builder *self, + // std::pair pt) { + // ir::basic_block *bb = pt.first; + // ir::instruction *instr = pt.second; + // if (instr) { + // if (bb != instr->get_parent()) + // throw std::runtime_error("invalid insertion point, instr not in + // bb"); + // self->set_insert_point(instr); + // } else { + // assert(bb); + // self->set_insert_point(bb); + // } + // }) + // Attr + .def("get_bool_attr", &mlir::OpBuilder::getBoolAttr) + .def("get_int32_attr", &mlir::OpBuilder::getI32IntegerAttr) + // Use arith.ConstantOp to create constants // Constants - .def("get_int1", &ir::builder::get_int1, ret::reference) - .def("get_int32", [](ir::builder *self, int32_t v) { return self->get_int32((uint32_t)v); }, ret::reference) - .def("get_uint32", &ir::builder::get_int32, ret::reference) - .def("get_int64", [](ir::builder *self, int64_t v) { return self->get_int64((uint64_t)v); }, ret::reference) - .def("get_uint64", &ir::builder::get_int64, ret::reference) - .def("get_float16", &ir::builder::get_float16, ret::reference) - .def("get_float32", &ir::builder::get_float32, ret::reference) - .def("get_range", &ir::builder::get_range, ret::reference) + .def("get_int1", + [](mlir::OpBuilder &self, bool v) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return mlir::Value(self.create( + loc, v, self.getI1Type())); + }) + .def("get_int32", + [](mlir::OpBuilder &self, int64_t v) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return mlir::Value(self.create( + loc, v, self.getI32Type())); + }) + // .def("get_uint32", &ir::builder::get_int32, ret::reference) + // .def("get_int64", [](ir::builder *self, int64_t v) { return + // self->get_int64((uint64_t)v); }, ret::reference) .def("get_uint64", + // &ir::builder::get_int64, ret::reference) .def("get_float16", + // &ir::builder::get_float16, ret::reference) + .def("get_float32", + [](mlir::OpBuilder &self, float v) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, self.getF32FloatAttr(v)); + }) + .def("get_null_value", + [](mlir::OpBuilder &self, mlir::Type type) -> mlir::Value { + auto loc = self.getUnknownLoc(); + if (auto floatTy = type.dyn_cast()) + return self.create( + loc, mlir::APFloat(floatTy.getFloatSemantics(), 0), floatTy); + else if (auto intTy = type.dyn_cast()) + return self.create(loc, 0, intTy); + else + throw std::runtime_error("Not implemented"); + }) + .def("get_all_ones_value", + [](mlir::OpBuilder &self, mlir::Type type) -> mlir::Value { + auto loc = self.getUnknownLoc(); + uint64_t val = 0xFFFFFFFFFFFFFFFF; + if (auto intTy = type.dyn_cast()) + return self.create(loc, val, intTy); + else + throw std::runtime_error("Not implemented"); + }) + // Types - .def("get_void_ty", &ir::builder::get_void_ty, ret::reference) - .def("get_int1_ty", &ir::builder::get_int1_ty, ret::reference) - .def("get_int8_ty", &ir::builder::get_int8_ty, ret::reference) - .def("get_int16_ty", &ir::builder::get_int16_ty, ret::reference) - .def("get_int32_ty", &ir::builder::get_int32_ty, ret::reference) - .def("get_int64_ty", &ir::builder::get_int64_ty, ret::reference) - .def("get_fp8_ty", &ir::builder::get_fp8_ty, ret::reference) - .def("get_half_ty", &ir::builder::get_half_ty, ret::reference) - .def("get_bf16_ty", &ir::builder::get_bf16_ty, ret::reference) - .def("get_float_ty", &ir::builder::get_float_ty, ret::reference) - .def("get_double_ty", &ir::builder::get_double_ty, ret::reference) - // terminator instructions - .def("create_br", &ir::builder::create_br, ret::reference) - .def("create_cond_br", &ir::builder::create_cond_br, ret::reference) - .def("create_ret_void", &ir::builder::create_ret_void, ret::reference) - // Dequantize instructions - .def("create_dequantize", &ir::builder::create_dequantize, ret::reference) + .def("get_void_ty", + [](mlir::OpBuilder &self) -> mlir::Type { + return self.getNoneType(); + }) + .def("get_int1_ty", + [](mlir::OpBuilder &self) -> mlir::Type { + return self.getI1Type(); + }) // or ret::copy? + .def("get_int8_ty", + [](mlir::OpBuilder &self) -> mlir::Type { return self.getI8Type(); }) + .def("get_int16_ty", + [](mlir::OpBuilder &self) -> mlir::Type { + return self.getType(16); + }) + .def( + "get_int32_ty", + [](mlir::OpBuilder &self) -> mlir::Type { return self.getI32Type(); }) + .def( + "get_int64_ty", + [](mlir::OpBuilder &self) -> mlir::Type { return self.getI64Type(); }) + .def("get_fp8_ty", + [](mlir::OpBuilder &self) -> mlir::Type { + return self.getType(); + }) + .def( + "get_half_ty", + [](mlir::OpBuilder &self) -> mlir::Type { return self.getF16Type(); }) + .def("get_bf16_ty", + [](mlir::OpBuilder &self) -> mlir::Type { + return self.getBF16Type(); + }) + .def( + "get_float_ty", + [](mlir::OpBuilder &self) -> mlir::Type { return self.getF32Type(); }) + .def( + "get_double_ty", + [](mlir::OpBuilder &self) -> mlir::Type { return self.getF64Type(); }) + .def("get_ptr_ty", + [](mlir::OpBuilder &self, mlir::Type &type, + int addrSpace) -> mlir::Type { + return mlir::triton::PointerType::get(type, addrSpace); + }) + .def("get_block_ty", + [](mlir::OpBuilder &self, mlir::Type &elementType, + std::vector &shape) -> mlir::Type { + return mlir::RankedTensorType::get(shape, elementType); + }) + .def("get_function_ty", + [](mlir::OpBuilder &self, std::vector inTypes, + std::vector outTypes) -> mlir::Type { + return self.getFunctionType(inTypes, outTypes); + }) + + // Ops + .def("get_or_insert_function", + [](mlir::OpBuilder &self, mlir::ModuleOp &module, + std::string &funcName, mlir::Type &funcType, + std::string &visibility) -> mlir::FuncOp { + if (mlir::Operation *funcOperation = module.lookupSymbol(funcName)) + return llvm::dyn_cast(funcOperation); + auto loc = self.getUnknownLoc(); + if (auto funcTy = funcType.dyn_cast()) { + llvm::SmallVector attrs = { + mlir::NamedAttribute(self.getStringAttr("sym_visibility"), + self.getStringAttr(visibility))}; + return self.create(loc, funcName, funcTy, attrs); + } + throw std::runtime_error("invalid function type"); + }) + .def( + "create_block", + [](mlir::OpBuilder &self) -> mlir::Block * { + mlir::Region *parent = self.getBlock()->getParent(); + return self.createBlock(parent); + }, + ret::reference) + .def( + "create_block_with_parent", + [](mlir::OpBuilder &self, mlir::Region &parent, + std::vector &argTypes) -> mlir::Block * { + auto argLoc = self.getUnknownLoc(); + llvm::SmallVector argLocs(argTypes.size(), + argLoc); + return self.createBlock(&parent, {}, argTypes, argLocs); + }, + ret::reference) + .def( + "new_block", + [](mlir::OpBuilder &self) -> mlir::Block * { + return new mlir::Block(); + }, + ret::reference) + // Structured control flow + .def("create_for_op", + [](mlir::OpBuilder &self, mlir::Value &lb, mlir::Value &ub, + mlir::Value &step, + std::vector &initArgs) -> mlir::scf::ForOp { + auto loc = self.getUnknownLoc(); + return self.create(loc, lb, ub, step, initArgs); + }) + .def("create_if_op", + [](mlir::OpBuilder &self, std::vector &retTypes, + mlir::Value &condition, bool withElse) -> mlir::scf::IfOp { + auto loc = self.getUnknownLoc(); + return self.create(loc, retTypes, condition, + withElse); + }) + .def("create_yield_op", + [](mlir::OpBuilder &self, + std::vector &yields) -> mlir::scf::YieldOp { + auto loc = self.getUnknownLoc(); + return self.create(loc, yields); + }) + .def("create_while_op", + [](mlir::OpBuilder &self, std::vector &retTypes, + std::vector &initArgs) -> mlir::scf::WhileOp { + auto loc = self.getUnknownLoc(); + return self.create(loc, retTypes, initArgs); + }) + .def("create_condition_op", + [](mlir::OpBuilder &self, mlir::Value &cond, + std::vector &args) -> mlir::scf::ConditionOp { + auto loc = self.getUnknownLoc(); + return self.create(loc, cond, args); + }) + + // miscellaneous + .def("create_make_range", + [](mlir::OpBuilder &self, int start, int end) -> mlir::Value { + auto loc = self.getUnknownLoc(); + auto retType = + mlir::RankedTensorType::get({end - start}, self.getI32Type()); + return self.create(loc, retType, start, + end); + }) + .def("create_get_program_id", + [](mlir::OpBuilder &self, int axis) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, self.getI32Type(), axis); + }) + // Cast instructions - .def("create_bitcast", &ir::builder::create_bitcast, ret::reference) - .def("create_cast", &ir::builder::create_cast, ret::reference) - .def("create_ptr_to_int", &ir::builder::create_ptr_to_int, ret::reference) - .def("create_si_to_fp", &ir::builder::create_si_to_fp, ret::reference) - .def("create_ui_to_fp", &ir::builder::create_ui_to_fp, ret::reference) - .def("create_fp_to_si", &ir::builder::create_fp_to_si, ret::reference) - .def("create_fp_to_ui", &ir::builder::create_fp_to_ui, ret::reference) - .def("create_fp_ext", &ir::builder::create_fp_ext, ret::reference) - .def("create_fp_trunc", &ir::builder::create_fp_trunc, ret::reference) - .def("create_int_cast", &ir::builder::create_int_cast, ret::reference) - .def("create_downcast", &ir::builder::create_downcast, ret::reference) - .def("create_int_to_ptr", &ir::builder::create_int_to_ptr, ret::reference) - .def("create_ptr_to_int", &ir::builder::create_ptr_to_int, ret::reference) - // phi - .def("create_phi", &ir::builder::create_phi, ret::reference) - // Binary instructions - .def("create_insert_nuwnswb_binop", &ir::builder::create_insert_nuwnswb_binop, ret::reference) - .def("create_fmul", &ir::builder::create_fmul, ret::reference) - .def("create_fdiv", &ir::builder::create_fdiv, ret::reference) - .def("create_frem", &ir::builder::create_frem, ret::reference) - .def("create_fadd", &ir::builder::create_fadd, ret::reference) - .def("create_fsub", &ir::builder::create_fsub, ret::reference) - .def("create_mul", &ir::builder::create_mul, ret::reference, - py::arg("lhs"), py::arg("rhs"), - py::arg("has_nuw")=false, py::arg("has_nsw")=false) - .def("create_sdiv", &ir::builder::create_sdiv, ret::reference) - .def("create_udiv", &ir::builder::create_udiv, ret::reference) - .def("create_srem", &ir::builder::create_srem, ret::reference) - .def("create_urem", &ir::builder::create_urem, ret::reference) - .def("create_add", &ir::builder::create_add, ret::reference, - py::arg("lhs"), py::arg("rhs"), - py::arg("has_nuw")=false, py::arg("has_nsw")=false) - .def("create_sub", &ir::builder::create_sub, ret::reference, - py::arg("lhs"), py::arg("rhs"), - py::arg("has_nuw")=false, py::arg("has_nsw")=false) - .def("create_shl", &ir::builder::create_shl, ret::reference, - py::arg("lhs"), py::arg("rhs"), - py::arg("has_nuw")=false, py::arg("has_nsw")=false) - .def("create_lshr", &ir::builder::create_lshr, ret::reference, - py::arg("lhs"), py::arg("rhs"), - py::arg("has_nuw")=false, py::arg("has_nsw")=false) - .def("create_ashr", &ir::builder::create_ashr, ret::reference, - py::arg("lhs"), py::arg("rhs"), - py::arg("has_nuw")=false, py::arg("has_nsw")=false) - // GEP - .def("create_gep", &ir::builder::create_gep, ret::reference) + // Conversions for custom FP types (FP8) + .def("create_fp_to_fp", + [](mlir::OpBuilder &self, mlir::Value &src, + mlir::Type &dstType) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, dstType, src); + }) + // Conversions for standard LLVM builtin types + .def("create_bitcast", + [](mlir::OpBuilder &self, mlir::Value &src, + mlir::Type &dstType) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, dstType, src); + }) + .def("create_si_to_fp", + [](mlir::OpBuilder &self, mlir::Value &src, + mlir::Type &dstType) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, dstType, src); + }) + .def("create_ui_to_fp", + [](mlir::OpBuilder &self, mlir::Value &src, + mlir::Type &dstType) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, dstType, src); + }) + .def("create_fp_to_si", + [](mlir::OpBuilder &self, mlir::Value &src, + mlir::Type &dstType) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, dstType, src); + }) + .def("create_fp_to_ui", + [](mlir::OpBuilder &self, mlir::Value &src, + mlir::Type &dstType) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, dstType, src); + }) + .def("create_fp_ext", + [](mlir::OpBuilder &self, mlir::Value &src, + mlir::Type &dstType) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, dstType, src); + }) + .def("create_fp_trunc", + [](mlir::OpBuilder &self, mlir::Value &src, + mlir::Type &dstType) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, dstType, src); + }) + .def("create_int_cast", + [](mlir::OpBuilder &self, mlir::Value &src, mlir::Type &dstType, + bool isSigned) -> mlir::Value { + auto loc = self.getUnknownLoc(); + // get element type if necessary + mlir::Type srcType = src.getType(); + auto srcTensorType = srcType.dyn_cast(); + auto dstTensorType = dstType.dyn_cast(); + mlir::Type srcEltType = srcType; + mlir::Type dstEltType = dstType; + if (dstTensorType && srcTensorType) { + dstEltType = dstTensorType.getElementType(); + srcEltType = srcTensorType.getElementType(); + } + unsigned srcWidth = srcEltType.getIntOrFloatBitWidth(); + unsigned dstWidth = dstEltType.getIntOrFloatBitWidth(); + if (srcWidth == dstWidth) + return self.create(loc, dstType, src); + else if (srcWidth > dstWidth) + return self.create(loc, dstType, src); + else if (isSigned) + return self.create(loc, dstType, src); + else + return self.create(loc, dstType, src); + }) + .def("create_to_index", + [](mlir::OpBuilder &self, mlir::Value &input) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, input, + self.getIndexType()); + }) + .def("create_index_to_si", + [](mlir::OpBuilder &self, mlir::Value &input) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, input, + self.getI32Type()); + }) + .def("create_fmul", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, lhs, rhs); + }) + .def("create_fdiv", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, lhs, rhs); + }) + .def("create_frem", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, lhs, rhs); + }) + .def("create_fadd", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, lhs, rhs); + }) + .def("create_fsub", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, lhs, rhs); + }) + .def("create_mul", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, lhs, rhs); + }) + .def("create_sdiv", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, lhs, rhs); + }) + .def("create_udiv", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, lhs, rhs); + }) + .def("create_srem", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, lhs, rhs); + }) + .def("create_urem", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, lhs, rhs); + }) + .def("create_add", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, lhs, rhs); + }) + .def("create_sub", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return mlir::Value( + self.create(loc, lhs, rhs)); + }) + .def("create_shl", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return mlir::Value( + self.create(loc, lhs, rhs)); + }) + .def("create_lshr", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return mlir::Value( + self.create(loc, lhs, rhs)); + }) + .def("create_ashr", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return mlir::Value( + self.create(loc, lhs, rhs)); + }) + // AddPtr (similar to GEP) + .def("create_addptr", + [](mlir::OpBuilder &self, mlir::Value &ptr, + mlir::Value &offset) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, ptr.getType(), ptr, + offset); + }) // Comparison (int) - .def("create_icmp", &ir::builder::create_icmp, ret::reference) - .def("create_icmpSLE", &ir::builder::create_icmpSLE, ret::reference) - .def("create_icmpSLT", &ir::builder::create_icmpSLT, ret::reference) - .def("create_icmpSGE", &ir::builder::create_icmpSGE, ret::reference) - .def("create_icmpSGT", &ir::builder::create_icmpSGT, ret::reference) - .def("create_icmpULE", &ir::builder::create_icmpULE, ret::reference) - .def("create_icmpULT", &ir::builder::create_icmpULT, ret::reference) - .def("create_icmpUGE", &ir::builder::create_icmpUGE, ret::reference) - .def("create_icmpUGT", &ir::builder::create_icmpUGT, ret::reference) - .def("create_icmpEQ", &ir::builder::create_icmpEQ, ret::reference) - .def("create_icmpNE", &ir::builder::create_icmpNE, ret::reference) + .def("create_icmpSLE", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpIPredicate::sle, lhs, rhs); + }) + .def("create_icmpSLT", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpIPredicate::slt, lhs, rhs); + }) + .def("create_icmpSGE", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpIPredicate::sge, lhs, rhs); + }) + .def("create_icmpSGT", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpIPredicate::sgt, lhs, rhs); + }) + .def("create_icmpULE", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpIPredicate::ule, lhs, rhs); + }) + .def("create_icmpULT", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpIPredicate::ult, lhs, rhs); + }) + .def("create_icmpUGE", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpIPredicate::uge, lhs, rhs); + }) + .def("create_icmpUGT", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpIPredicate::ugt, lhs, rhs); + }) + .def("create_icmpEQ", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpIPredicate::eq, lhs, rhs); + }) + .def("create_icmpNE", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpIPredicate::ne, lhs, rhs); + }) // Comparison (float) - .def("create_fcmp", &ir::builder::create_fcmp, ret::reference) - .def("create_fcmpOLT", &ir::builder::create_fcmpOLT, ret::reference) - .def("create_fcmpOGT", &ir::builder::create_fcmpOGT, ret::reference) - .def("create_fcmpOLE", &ir::builder::create_fcmpOLE, ret::reference) - .def("create_fcmpOGE", &ir::builder::create_fcmpOGE, ret::reference) - .def("create_fcmpOEQ", &ir::builder::create_fcmpOEQ, ret::reference) - .def("create_fcmpONE", &ir::builder::create_fcmpONE, ret::reference) - .def("create_fcmpULT", &ir::builder::create_fcmpULT, ret::reference) - .def("create_fcmpUGT", &ir::builder::create_fcmpUGT, ret::reference) - .def("create_fcmpULE", &ir::builder::create_fcmpULE, ret::reference) - .def("create_fcmpUGE", &ir::builder::create_fcmpUGE, ret::reference) - .def("create_fcmpUEQ", &ir::builder::create_fcmpUEQ, ret::reference) - .def("create_fcmpUNE", &ir::builder::create_fcmpUNE, ret::reference) - // Logical - .def("create_and", &ir::builder::create_and, ret::reference) - .def("create_xor", &ir::builder::create_xor, ret::reference) - .def("create_or", &ir::builder::create_or, ret::reference) + .def("create_fcmpOLT", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpFPredicate::OLT, lhs, rhs); + }) + .def("create_fcmpOGT", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpFPredicate::OGT, lhs, rhs); + }) + .def("create_fcmpOLE", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpFPredicate::OLE, lhs, rhs); + }) + .def("create_fcmpOGE", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpFPredicate::OGE, lhs, rhs); + }) + .def("create_fcmpOEQ", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpFPredicate::OEQ, lhs, rhs); + }) + .def("create_fcmpONE", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpFPredicate::ONE, lhs, rhs); + }) + .def("create_fcmpULT", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpFPredicate::ULT, lhs, rhs); + }) + .def("create_fcmpUGT", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpFPredicate::UGT, lhs, rhs); + }) + .def("create_fcmpULE", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpFPredicate::ULE, lhs, rhs); + }) + .def("create_fcmpUGE", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpFPredicate::UGE, lhs, rhs); + }) + .def("create_fcmpUEQ", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpFPredicate::UEQ, lhs, rhs); + }) + .def("create_fcmpUNE", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, mlir::arith::CmpFPredicate::UNE, lhs, rhs); + }) + // // Logical + .def("create_and", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, lhs, rhs); + }) + .def("create_xor", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, lhs, rhs); + }) + .def("create_or", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, lhs, rhs); + }) // Input/Output - .def("create_load", &ir::builder::create_load, ret::reference) - .def("create_store", &ir::builder::create_store, ret::reference) - .def("create_masked_load", &ir::builder::create_masked_load, ret::reference) - .def("create_masked_store", &ir::builder::create_masked_store, ret::reference) - // Block instruction - .def("create_splat", &ir::builder::create_splat, ret::reference) - .def("create_reshape", &ir::builder::create_reshape, ret::reference) - .def("create_cat", &ir::builder::create_cat, ret::reference) - .def("create_broadcast", &ir::builder::create_broadcast, ret::reference) - // atomic - .def("create_atomic_cas", &ir::builder::create_atomic_cas, ret::reference) - .def("create_atomic_rmw", &ir::builder::create_atomic_rmw, ret::reference) - // Utilities - .def("create_clock", &ir::builder::create_clock, ret::reference) - .def("create_globaltimer", &ir::builder::create_globaltimer, ret::reference) - // Extern instruction - .def("create_extern_elementwise", &ir::builder::create_extern_elementwise, ret::reference) + .def("create_load", + [](mlir::OpBuilder &self, mlir::Value &ptrs, + mlir::triton::CacheModifier cacheModifier, + mlir::triton::EvictionPolicy evictionPolicy, + bool isVolatile) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, ptrs, cacheModifier, evictionPolicy, isVolatile); + }) + .def("create_store", + [](mlir::OpBuilder &self, mlir::Value &ptrs, + mlir::Value &value) -> void { + auto loc = self.getUnknownLoc(); + self.create(loc, ptrs, value); + }) + .def("create_masked_load", + [](mlir::OpBuilder &self, mlir::Value &ptrs, mlir::Value &mask, + std::optional &other, + mlir::triton::CacheModifier cacheModifier, + mlir::triton::EvictionPolicy evictionPolicy, + bool isVolatile) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, ptrs, mask, other.value_or(mlir::Value()), cacheModifier, + evictionPolicy, isVolatile); + }) + .def("create_masked_store", + [](mlir::OpBuilder &self, mlir::Value &ptrs, mlir::Value &val, + mlir::Value &mask) -> void { + auto loc = self.getUnknownLoc(); + self.create(loc, ptrs, val, mask); + }) + .def("create_view", + [](mlir::OpBuilder &self, mlir::Value &arg, + std::vector &shape) -> mlir::Value { + auto loc = self.getUnknownLoc(); + auto argType = arg.getType() + .dyn_cast() + .getElementType(); + return self.create( + loc, mlir::RankedTensorType::get(shape, argType), arg); + }) + .def( + "create_expand_dims", + [](mlir::OpBuilder &self, mlir::Value &arg, int axis) -> mlir::Value { + auto loc = self.getUnknownLoc(); + auto argType = arg.getType().dyn_cast(); + auto argEltType = argType.getElementType(); + std::vector retShape = argType.getShape(); + retShape.insert(retShape.begin() + axis, 1); + return self.create( + loc, mlir::RankedTensorType::get(retShape, argEltType), arg, + axis); + }) + .def("create_cat", + [](mlir::OpBuilder &self, mlir::Value &lhs, + mlir::Value &rhs) -> mlir::Value { + auto loc = self.getUnknownLoc(); + auto lhsType = lhs.getType().dyn_cast(); + auto rhsType = rhs.getType().dyn_cast(); + if (!(lhsType.getShape().size() == 1 && + rhsType.getShape().size() == 1)) + throw std::runtime_error( + "shape not supported by cat. Expecting rank-1 inputs"); + std::vector shape{lhsType.getShape()[0] + + rhsType.getShape()[0]}; + return self.create( + loc, + mlir::RankedTensorType::get(shape, lhsType.getElementType()), + lhs, rhs); + }) + .def("create_trans", + [](mlir::OpBuilder &self, mlir::Value &arg) -> mlir::Value { + auto loc = self.getUnknownLoc(); + auto argType = arg.getType().dyn_cast(); + auto argEltType = argType.getElementType(); + std::vector retShape = argType.getShape(); + std::reverse(retShape.begin(), retShape.end()); + return self.create( + loc, mlir::RankedTensorType::get(retShape, argEltType), arg); + }) + .def("create_broadcast", + [](mlir::OpBuilder &self, mlir::Value &arg, + std::vector &shape) -> mlir::Value { + auto loc = self.getUnknownLoc(); + if (auto argType = + arg.getType().dyn_cast()) + return self.createOrFold( + loc, + mlir::RankedTensorType::get(shape, argType.getElementType()), + arg); + throw std::runtime_error( + "arg is not of RankedTensorType, use create_splat"); + }) + .def("create_splat", + [](mlir::OpBuilder &self, mlir::Value &arg, + std::vector &shape) -> mlir::Value { + auto loc = self.getUnknownLoc(); + auto argType = arg.getType(); + auto ret = self.createOrFold( + loc, mlir::RankedTensorType::get(shape, argType), arg); + return ret; + }) + // // atomic + .def("create_atomic_cas", + [](mlir::OpBuilder &self, mlir::Value &ptr, mlir::Value &cmp, + mlir::Value &val) -> mlir::Value { + auto loc = self.getUnknownLoc(); + mlir::Type dstType; + if (auto srcTensorType = + ptr.getType().dyn_cast()) { + mlir::Type dstElemType = srcTensorType.getElementType() + .cast() + .getPointeeType(); + dstType = mlir::RankedTensorType::get(srcTensorType.getShape(), + dstElemType); + } else { + auto ptrType = mlir::getElementTypeOrSelf(ptr) + .cast(); + dstType = ptrType.getPointeeType(); + } + return self.create(loc, dstType, ptr, + cmp, val); + }) + .def("create_atomic_rmw", + [](mlir::OpBuilder &self, mlir::triton::RMWOp rmwOp, + mlir::Value &ptr, mlir::Value &val, + mlir::Value &mask) -> mlir::Value { + auto loc = self.getUnknownLoc(); + mlir::Type dstType; + if (auto srcTensorType = + ptr.getType().dyn_cast()) { + mlir::Type dstElemType = srcTensorType.getElementType() + .cast() + .getPointeeType(); + dstType = mlir::RankedTensorType::get(srcTensorType.getShape(), + dstElemType); + } else { + auto ptrType = mlir::getElementTypeOrSelf(ptr) + .cast(); + dstType = ptrType.getPointeeType(); + } + return self.create(loc, dstType, rmwOp, + ptr, val, mask); + }) + // External + .def("create_external_elementwise", + [](mlir::OpBuilder &self, const std::string &libName, + const std::string &libPath, const std::string &symbol, + std::vector &argList, + mlir::Type retType) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, retType, argList, libName, libPath, symbol); + }) // Built-in instruction - .def("create_get_program_id", &ir::builder::create_get_program_id, ret::reference) - .def("create_get_num_programs", &ir::builder::create_get_num_programs, ret::reference) - .def("create_exp", &ir::builder::create_exp, ret::reference) - .def("create_cos", &ir::builder::create_cos, ret::reference) - .def("create_sin", &ir::builder::create_sin, ret::reference) - .def("create_log", &ir::builder::create_log, ret::reference) - .def("create_dot", &ir::builder::create_dot, ret::reference) - .def("create_trans", &ir::builder::create_trans, ret::reference) - .def("create_sqrt", &ir::builder::create_sqrt, ret::reference) - .def("create_reduce", &ir::builder::create_reduce, ret::reference) - .def("create_select", &ir::builder::create_select, ret::reference) - // struct - .def("insert_value", &ir::builder::create_insert_value, ret::reference) - .def("extract_value", &ir::builder::create_extract_value, ret::reference) - // Intrinsics - // These have no place in the IR, and hopefully they can be removed at some point - .def("create_umulhi", &ir::builder::create_umulhi, ret::reference) - .def("create_copy_to_shared", &ir::builder::create_copy_to_shared, ret::reference) - .def("create_masked_load_async", &ir::builder::create_masked_load_async, ret::reference) - .def("create_copy_from_shared", &ir::builder::create_copy_from_shared, ret::reference) - .def("create_barrier", &ir::builder::create_barrier, ret::reference) - .def("create_async_wait", &ir::builder::create_async_wait, ret::reference) - .def("create_prefetch_s", &ir::builder::create_prefetch_s, ret::reference); + .def("create_get_program_id", + [](mlir::OpBuilder &self, int axis) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, self.getI32Type(), self.getI32IntegerAttr(axis)); + }) + .def("create_get_num_programs", + [](mlir::OpBuilder &self, int axis) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create( + loc, self.getI32Type(), self.getI32IntegerAttr(axis)); + }) + .def("create_dot", + [](mlir::OpBuilder &self, mlir::Value &a, mlir::Value &b, + mlir::Value &c, bool allowTF32) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, c.getType(), a, b, c, + allowTF32); + }) + .def("create_exp", + [](mlir::OpBuilder &self, mlir::Value &val) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, val); + }) + .def("create_cos", + [](mlir::OpBuilder &self, mlir::Value &val) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, val); + }) + .def("create_sin", + [](mlir::OpBuilder &self, mlir::Value &val) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, val); + }) + .def("create_log", + [](mlir::OpBuilder &self, mlir::Value &val) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, val); + }) + .def("create_sqrt", + [](mlir::OpBuilder &self, mlir::Value &val) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, val); + }) + .def("create_reduce", + [](mlir::OpBuilder &self, mlir::Value &operand, + mlir::triton::RedOp redOp, int axis) -> mlir::Value { + auto loc = self.getUnknownLoc(); + auto inputTensorType = + operand.getType().dyn_cast(); + std::vector shape = inputTensorType.getShape(); + shape.erase(shape.begin() + axis); + bool withIndex = mlir::triton::ReduceOp::withIndex(redOp); + mlir::Type resType = withIndex ? self.getI32Type() + : inputTensorType.getElementType(); + if (!shape.empty()) { + resType = mlir::RankedTensorType::get(shape, resType); + } + return self.create(loc, resType, redOp, + operand, axis); + }) + .def("create_ptr_to_int", + [](mlir::OpBuilder &self, mlir::Value &val, + mlir::Type &type) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, type, val); + }) + .def("create_int_to_ptr", + [](mlir::OpBuilder &self, mlir::Value &val, + mlir::Type &type) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, type, val); + }) + .def("create_select", + [](mlir::OpBuilder &self, mlir::Value &condition, + mlir::Value &trueValue, mlir::Value &falseValue) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create(loc, condition, trueValue, + falseValue); + }) + .def("create_printf", + [](mlir::OpBuilder &self, const std::string &prefix, + const std::vector &values) -> void { + auto loc = self.getUnknownLoc(); + self.create( + loc, + mlir::StringAttr::get(self.getContext(), + llvm::StringRef(prefix)), + values); + }) + // Undef + .def("create_undef", + [](mlir::OpBuilder &self, mlir::Type &type) -> mlir::Value { + auto loc = self.getUnknownLoc(); + return self.create<::mlir::LLVM::UndefOp>(loc, type); + }) + // Force GPU barrier + .def("create_barrier", [](mlir::OpBuilder &self) { + auto loc = self.getUnknownLoc(); + self.create(loc); + }); + + py::class_(m, "pass_manager") + .def(py::init()) + .def("enable_debug", + [](mlir::PassManager &self) { + auto printingFlags = mlir::OpPrintingFlags(); + printingFlags.elideLargeElementsAttrs(16); + self.enableIRPrinting( + /*shouldPrintBeforePass=*/nullptr, + /*shouldPrintAfterPass=*/ + [](mlir::Pass *pass, mlir::Operation *) { + return ::triton::tools::getBoolEnv("MLIR_ENABLE_DUMP"); + }, + /*printModuleScope=*/false, + /*printAfterOnlyOnChange=*/true, + /*printAfterOnlyOnFailure*/ false, llvm::dbgs(), + printingFlags); + }) + .def("run", + [](mlir::PassManager &self, mlir::ModuleOp &mod) { + // TODO: maybe dump module to file and print error for better + // diagnostics + if (mlir::failed(self.run(mod.getOperation()))) + throw std::runtime_error("PassManager::run failed"); + }) + .def( + "add_sccp_pass", + [](mlir::PassManager &self) { self.addPass(mlir::createSCCPPass()); }) + .def("add_coalesce_pass", + [](mlir::PassManager &self) { + self.addPass(mlir::createTritonGPUCoalescePass()); + }) + .def("add_symbol_dce_pass", + [](mlir::PassManager &self) { + self.addPass(mlir::createSymbolDCEPass()); + }) + .def("add_inliner_pass", + [](mlir::PassManager &self) { + self.addPass(mlir::createInlinerPass()); + }) + .def("add_canonicalizer_pass", + [](mlir::PassManager &self) { + self.addPass(mlir::createCanonicalizerPass()); + }) + .def("add_cse_pass", + [](mlir::PassManager &self) { self.addPass(mlir::createCSEPass()); }) + .def("add_licm_pass", + [](mlir::PassManager &self) { + self.addPass(mlir::createLoopInvariantCodeMotionPass()); + }) + .def("add_triton_combine_pass", + [](mlir::PassManager &self) { + self.addPass(mlir::triton::createCombineOpsPass()); + }) + .def("add_convert_triton_to_tritongpu_pass", + [](mlir::PassManager &self, int numWarps) { + self.addPass( + mlir::triton::createConvertTritonToTritonGPUPass(numWarps)); + }) + .def("add_tritongpu_pipeline_pass", + [](mlir::PassManager &self, int numStages) { + self.addPass(mlir::createTritonGPUPipelinePass(numStages)); + }) + .def("add_tritongpu_prefetch_pass", + [](mlir::PassManager &self) { + self.addPass(mlir::createTritonGPUPrefetchPass()); + }) + .def("add_triton_gpu_combine_pass", + [](mlir::PassManager &self, int computeCapability) { + self.addPass( + mlir::createTritonGPUCombineOpsPass(computeCapability)); + }) + .def("add_triton_gpu_to_llvm", + [](mlir::PassManager &self) { + self.addPass(mlir::triton::createConvertTritonGPUToLLVMPass()); + }) + .def("add_scf_to_cfg", [](mlir::PassManager &self) { + self.addPass(mlir::createLowerToCFGPass()); + }); +} + +void init_triton_translation(py::module &m) { + using ret = py::return_value_policy; + + m.def("get_shared_memory_size", [](mlir::ModuleOp mod) { + auto shared = mod->getAttrOfType("triton_gpu.shared"); + return shared.getInt(); + }); + + m.def( + "translate_triton_gpu_to_llvmir", + [](mlir::ModuleOp op, int computeCapability) { + llvm::LLVMContext llvmContext; + auto llvmModule = ::mlir::triton::translateTritonGPUToLLVMIR( + &llvmContext, op, computeCapability); + if (!llvmModule) + llvm::report_fatal_error("Failed to translate TritonGPU to LLVM IR."); + + std::string str; + llvm::raw_string_ostream os(str); + llvmModule->print(os, nullptr); + os.flush(); + return str; + }, + ret::take_ownership); + + m.def( + "translate_llvmir_to_ptx", + [](const std::string llvmIR, int capability, int version) -> std::string { + // create LLVM module from C++ + llvm::LLVMContext context; + std::unique_ptr buffer = + llvm::MemoryBuffer::getMemBuffer(llvmIR.c_str()); + llvm::SMDiagnostic error; + std::unique_ptr module = + llvm::parseIR(buffer->getMemBufferRef(), error, context); + if (!module) { + llvm::report_fatal_error( + "failed to parse IR: " + error.getMessage() + + "lineno: " + std::to_string(error.getLineNo())); + } + + // translate module to PTX + auto ptxCode = + triton::translateLLVMIRToPTX(*module, capability, version); + return ptxCode; + }, + ret::take_ownership); + + m.def("compile_ptx_to_cubin", + [](const std::string &ptxCode, const std::string &ptxasPath, + int capability) -> py::object { + py::gil_scoped_release allow_threads; + + // compile ptx with ptxas + llvm::SmallString<64> fsrc; + llvm::SmallString<64> flog; + llvm::sys::fs::createTemporaryFile("compile-ptx-src", "", fsrc); + llvm::sys::fs::createTemporaryFile("compile-ptx-log", "", flog); + std::string fbin = std::string(fsrc) + ".o"; + llvm::FileRemover srcRemover(fsrc); + llvm::FileRemover logRemover(flog); + llvm::FileRemover binRemover(fbin); + const char *_fsrc = fsrc.c_str(); + const char *_flog = flog.c_str(); + const char *_fbin = fbin.c_str(); + std::ofstream ofs(_fsrc); + ofs << ptxCode << std::endl; + ofs.close(); + std::string cmd; + int err; + cmd = ptxasPath + " -v --gpu-name=sm_" + std::to_string(capability) + + " " + _fsrc + " -o " + _fsrc + ".o 2> " + _flog; + err = system(cmd.c_str()); + if (err != 0) { + std::ifstream _log(_flog); + std::string log(std::istreambuf_iterator(_log), {}); + throw std::runtime_error("Internal Triton PTX codegen error: \n" + + log); + } + std::ifstream _cubin(_fbin, std::ios::binary); + std::string cubin(std::istreambuf_iterator(_cubin), {}); + _cubin.close(); + py::bytes bytes(cubin); + return std::move(bytes); + }); + + m.def("add_external_libs", + [](mlir::ModuleOp &op, const std::vector &names, + const std::vector &paths) { + ::mlir::triton::addExternalLibs(op, names, paths); + }); } void init_triton(py::module &m) { py::module subm = m.def_submodule("triton"); - init_triton_codegen(std::move(subm.def_submodule("code_gen"))); - init_triton_runtime(std::move(subm.def_submodule("runtime"))); - init_triton_ir(std::move(subm.def_submodule("ir"))); + // init_triton_codegen(subm.def_submodule("code_gen")); + init_triton_runtime(subm.def_submodule("runtime")); + init_triton_ir(subm.def_submodule("ir")); + init_triton_translation(subm); } diff --git a/python/test/unit/language/printf_helper.py b/python/test/unit/language/printf_helper.py new file mode 100644 index 000000000000..22e1350f1642 --- /dev/null +++ b/python/test/unit/language/printf_helper.py @@ -0,0 +1,56 @@ +import torch +from torch.testing import assert_close + +import triton +import triton.language as tl + +torch_type = { + "bool": torch.bool, + 'int8': torch.int8, + 'uint8': torch.uint8, + 'int16': torch.int16, + "int32": torch.int32, + 'int64': torch.long, + 'float16': torch.float16, + 'bfloat16': torch.bfloat16, + "float32": torch.float32, + "float64": torch.float64 +} + + +def get_tensor(shape, data_type, b_positive=False): + x = None + if data_type.startswith('int'): + x = torch.arange(0, shape[0], dtype=torch_type[data_type], device='cuda') + else: + x = torch.arange(0, shape[0], dtype=torch_type[data_type], device='cuda') + + return x + +# @pytest.mark.parametrize('data_type', +# [("int8"), +# ('int16'), +# ('int32'), +# ("int64"), +# ('float16'), +# ("float32"), +# ("float64")]) + + +def printf(data_type): + @triton.jit + def kernel(X, Y, BLOCK: tl.constexpr): + x = tl.load(X + tl.arange(0, BLOCK)) + tl.printf("", x) + tl.store(Y + tl.arange(0, BLOCK), x) + + shape = (128, ) + # limit the range of integers so that the sum does not overflow + x = get_tensor(shape, data_type) + y = torch.zeros(shape, dtype=x.dtype, device="cuda") + kernel[(1,)](x, y, BLOCK=shape[0]) + assert_close(y, x) + + +printf("float16") +printf("int8") diff --git a/python/test/unit/language/test_core.py b/python/test/unit/language/test_core.py index 5231a5bfae17..203995bc8d2f 100644 --- a/python/test/unit/language/test_core.py +++ b/python/test/unit/language/test_core.py @@ -1,5 +1,6 @@ # flake8: noqa: F821,F841 import itertools +import os import re from typing import Optional, Union @@ -104,8 +105,8 @@ def check_type_supported(dtype): ''' skip test if dtype is not supported on the current device ''' - cc = _triton.runtime.cc(_triton.runtime.backend.CUDA, torch.cuda.current_device()) - if cc < 80 and (dtype is tl.bfloat16 or dtype == "bfloat16" or dtype is torch.bfloat16): + cc = torch.cuda.get_device_capability() + if cc[0] < 8 and (dtype is tl.bfloat16 or dtype == "bfloat16" or dtype is torch.bfloat16): pytest.skip("bfloat16 is only supported on NVGPU with cc >= 80") @@ -414,8 +415,8 @@ def where_kernel(cond_ptr, a_ptr, b_ptr, output_ptr, n_elements, def test_where_broadcast(): @triton.jit def where_kernel(cond_ptr, a_ptr, out_ptr, BLOCK_SIZE: tl.constexpr): - xoffsets = tl.reshape(tl.arange(0, BLOCK_SIZE), [BLOCK_SIZE, 1]) - yoffsets = tl.reshape(tl.arange(0, BLOCK_SIZE), [1, BLOCK_SIZE]) + xoffsets = tl.arange(0, BLOCK_SIZE)[:, None] + yoffsets = tl.arange(0, BLOCK_SIZE)[None, :] mask = tl.load(cond_ptr + yoffsets) vals = tl.load(a_ptr + yoffsets + BLOCK_SIZE * xoffsets) @@ -424,8 +425,8 @@ def where_kernel(cond_ptr, a_ptr, out_ptr, BLOCK_SIZE: tl.constexpr): @triton.jit def where_scalar_condition(a_ptr, out_ptr, BLOCK_SIZE: tl.constexpr): - xoffsets = tl.reshape(tl.arange(0, BLOCK_SIZE), [BLOCK_SIZE, 1]) - yoffsets = tl.reshape(tl.arange(0, BLOCK_SIZE), [1, BLOCK_SIZE]) + xoffsets = tl.arange(0, BLOCK_SIZE)[:, None] + yoffsets = tl.arange(0, BLOCK_SIZE)[None, :] mask = 0 vals = tl.load(a_ptr + yoffsets + BLOCK_SIZE * xoffsets) res = tl.where(mask, vals, 0.) @@ -462,9 +463,6 @@ def test_unary_op(dtype_x, expr, device='cuda'): # ---------------- # test math ops # ---------------- -# @pytest.mark.parametrize("expr", [ -# 'exp', 'log', 'cos', 'sin' -# ]) @pytest.mark.parametrize("expr", [ @@ -490,9 +488,13 @@ def make_ptr_str(name, shape): return f"{name} + {' + '.join(offsets)}" +# TODO: handle `%4 = triton_gpu.convert_layout %3 : (tensor<32xi32, #blocked0>) -> tensor<32xi32, #triton_gpu.slice<{dim = 0, parent = #blocked1}>>`` @pytest.mark.parametrize("expr, dtype_str", [ (f'x[{s}]', d) - for s in ['None, :', ':, None', 'None, :, :', ':, :, None'] + for s in ['None, :', ':, None'] + # FIXME: 3d indexing doesn't work + #'None, :, :', + # ':, :, None'] for d in ['int32', 'uint32', 'uint16'] ]) def test_index1d(expr, dtype_str, device='cuda'): @@ -605,8 +607,8 @@ def without_fn(X, Y, A, B, C): ] for mode in ['all_neg', 'all_pos', 'min_neg', 'max_pos']])) def test_atomic_rmw(op, dtype_x_str, mode, device='cuda'): - cc = _triton.runtime.cc(_triton.runtime.backend.CUDA, torch.cuda.current_device()) - if cc < 70: + capability = torch.cuda.get_device_capability() + if capability[0] < 7: if dtype_x_str == 'float16': pytest.skip("Only test atomic float16 ops on devices with sm >= 70") n_programs = 5 @@ -651,9 +653,10 @@ def kernel(X, Z): np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01) -@pytest.mark.parametrize("axis", [0, 1]) -def test_tensor_atomic_rmw(axis, device="cuda"): - shape0, shape1 = 8, 8 +@pytest.mark.parametrize("shape, axis", + [(shape, axis) for shape in [(2, 2), (2, 8), (8, 2), (8, 8), (32, 32)] for axis in [0, 1]]) +def test_tensor_atomic_rmw(shape, axis, device="cuda"): + shape0, shape1 = shape # triton kernel @triton.jit @@ -662,14 +665,18 @@ def kernel(Z, X, AXIS: tl.constexpr, SHAPE0: tl.constexpr, SHAPE1: tl.constexpr) off1 = tl.arange(0, SHAPE1) x = tl.load(X + off0[:, None] * SHAPE1 + off1[None, :]) z = tl.sum(x, axis=AXIS) - tl.atomic_add(Z + off0, z) + if AXIS == 1: + tl.atomic_add(Z + off0, z) + else: + tl.atomic_add(Z + off1, z) rs = RandomState(17) x = numpy_random((shape0, shape1), dtype_str="float32", rs=rs) # reference result - z_ref = np.sum(x, axis=axis) + z_ref = np.sum(x, axis=axis, keepdims=False) # triton result x_tri = to_triton(x, device=device) - z_tri = to_triton(np.zeros((shape0,), dtype="float32"), device=device) + z_shape = (shape0, ) if axis == 1 else (shape1, ) + z_tri = to_triton(np.zeros(z_shape, dtype="float32"), device=device) kernel[(1,)](z_tri, x_tri, axis, shape0, shape1) np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=1e-4) @@ -724,6 +731,10 @@ def serialized_add(data, Lock): (f'int{x}', f'uint{x}', True) for x in [8, 16, 32, 64] ]) def test_cast(dtype_x, dtype_z, bitcast, device='cuda'): + # bfloat16 on cc < 80 will not be tested + check_type_supported(dtype_x) + check_type_supported(dtype_z) + # This is tricky because numpy doesn't have bfloat, and torch doesn't have uints. x0 = 43 if dtype_x in int_dtypes else 43.5 if dtype_x in float_dtypes and dtype_z == 'int1': @@ -737,9 +748,11 @@ def test_cast(dtype_x, dtype_z, bitcast, device='cuda'): # triton kernel @triton.jit def kernel(X, Z, BITCAST: tl.constexpr): - x = tl.load(X) + x_ptr = X + tl.arange(0, 1) + z_ptr = Z + tl.arange(0, 1) + x = tl.load(x_ptr) z = x.to(Z.dtype.element_ty, bitcast=BITCAST) - tl.store(Z, z) + tl.store(z_ptr, z) dtype_z_np = dtype_z if dtype_z != 'int1' else 'bool_' # triton result @@ -869,9 +882,19 @@ def copy_kernel(input_ptr, output_ptr, n_elements, BLOCK_SIZE: tl.constexpr): # --------------- +def get_reduced_dtype(dtype_str, op): + if op == 'argmin' or op == 'argmax': + return 'int32' + if dtype_str in ['int8', 'uint8', 'int16', 'uint16']: + return 'int32' + if dtype_str == 'bfloat16': + return 'float32' + return dtype_str + + @pytest.mark.parametrize("op, dtype_str, shape", [(op, dtype, shape) - for op in ['min', 'max', 'argmin', 'argmax', 'sum'] + for op in ['min', 'max', 'sum'] for dtype in dtypes_with_bfloat16 for shape in [32, 64, 128, 512]]) def test_reduce1d(op, dtype_str, shape, device='cuda'): @@ -892,7 +915,7 @@ def kernel(X, Z, BLOCK: tl.constexpr): numpy_op = {'sum': np.sum, 'max': np.max, 'min': np.min, 'argmin': np.argmin, 'argmax': np.argmax}[op] # numpy result - z_dtype_str = 'int32' if op == 'argmin' or op == 'argmax' else dtype_str + z_dtype_str = get_reduced_dtype(dtype_str, op) z_tri_dtype_str = z_dtype_str if op not in ['argmin', 'argmax'] and dtype_str == 'bfloat16': z_dtype_str = 'float32' @@ -919,21 +942,35 @@ def kernel(X, Z, BLOCK: tl.constexpr): np.testing.assert_equal(z_ref, z_tri) +# TODO: [Qingyi] Fix argmin / argmax reduce_configs1 = [ (op, dtype, (1, 1024), axis) for dtype in dtypes_with_bfloat16 - for op in ['min', 'max', 'argmin', 'argmax', 'sum'] + for op in ['min', 'max', 'sum'] for axis in [1] ] + + +# shape (128, 256) and (32, 1024) are not enabled on sm86 because the required shared memory +# exceeds the limit of 99KB +reduce2d_shapes = [(2, 32), (4, 32), (4, 128)] +# TODO: fix and uncomment +# , (32, 64), (64, 128)] +if 'V100' in torch.cuda.get_device_name(0): + reduce2d_shapes += [(128, 256) and (32, 1024)] + + reduce_configs2 = [ (op, 'float32', shape, axis) - for op in ['min', 'max', 'argmin', 'argmax', 'sum'] - for shape in [(2, 32), (4, 32), (4, 128), (32, 64), (64, 128), (128, 256), (32, 1024)] + for op in ['min', 'max', 'sum'] + for shape in reduce2d_shapes for axis in [0, 1] ] @pytest.mark.parametrize("op, dtype_str, shape, axis", reduce_configs1 + reduce_configs2) def test_reduce2d(op, dtype_str, shape, axis, device='cuda'): + check_type_supported(dtype_str) # bfloat16 on cc < 80 will not be tested + # triton kernel @triton.jit def kernel(X, Z, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, AXIS: tl.constexpr): @@ -954,7 +991,7 @@ def kernel(X, Z, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, AXIS: tl.constexp x_tri = to_triton(x) numpy_op = {'sum': np.sum, 'max': np.max, 'min': np.min, 'argmin': np.argmin, 'argmax': np.argmax}[op] - z_dtype_str = 'int32' if op == 'argmin' or op == 'argmax' else dtype_str + z_dtype_str = get_reduced_dtype(dtype_str, op) z_tri_dtype_str = z_dtype_str # numpy result if op not in ['argmin', 'argmax'] and dtype_str == 'bfloat16': @@ -992,7 +1029,8 @@ def kernel(X, Z, BLOCK_M: tl.constexpr, BLOCK_N: tl.constexpr, AXIS: tl.constexp @pytest.mark.parametrize("dtype_str, shape, perm", [(dtype, shape, perm) - for dtype in ['bfloat16', 'float16', 'float32'] + # TODO: bfloat16 + for dtype in ['float16', 'float32'] for shape in [(64, 64), (128, 128)] for perm in [(1, 0)]]) def test_permute(dtype_str, shape, perm, device='cuda'): @@ -1038,25 +1076,37 @@ def kernel(X, stride_xm, stride_xn, # --------------- -@pytest.mark.parametrize("epilogue, allow_tf32, dtype", - [(epilogue, allow_tf32, dtype) +@pytest.mark.parametrize("M, N, K, num_warps, col_a, col_b, epilogue, allow_tf32, dtype", + [(*shape, 4, False, False, epilogue, allow_tf32, dtype) + for shape in [(64, 64, 64)] for epilogue in ['none', 'trans', 'add-matrix', 'add-rows', 'add-cols', 'softmax', 'chain-dot'] for allow_tf32 in [True, False] - for dtype in ['float16'] - if not (allow_tf32 and (dtype in ['float16']))]) -def test_dot(epilogue, allow_tf32, dtype, device='cuda'): - cc = _triton.runtime.cc(_triton.runtime.backend.CUDA, torch.cuda.current_device()) - if cc < 70: + for dtype in ['float16', 'float32'] + if not (allow_tf32 and (dtype in ['float16']))] + + + [(*shape_nw, col_a, col_b, 'none', allow_tf32, dtype) + for shape_nw in [[128, 256, 32, 8], + [128, 16, 32, 4], + [32, 128, 64, 4], + [128, 128, 64, 4], + [64, 128, 128, 4], + [32, 128, 64, 2], + [128, 128, 64, 2], + [64, 128, 128, 4]] + for allow_tf32 in [True] + for col_a in [True, False] + for col_b in [True, False] + for dtype in ['int8', 'float16', 'float32']]) +def test_dot(M, N, K, num_warps, col_a, col_b, epilogue, allow_tf32, dtype, device='cuda'): + capability = torch.cuda.get_device_capability() + if capability[0] < 7: pytest.skip("Only test tl.dot() on devices with sm >= 70") - if cc < 80: + if capability[0] < 8: if dtype == 'int8': pytest.skip("Only test int8 on devices with sm >= 80") elif dtype == 'float32' and allow_tf32: pytest.skip("Only test tf32 on devices with sm >= 80") - - M, N, K = 128, 128, 64 - num_warps = 8 - trans_a, trans_b = False, False + torch.backends.cuda.matmul.allow_tf32 = allow_tf32 # triton kernel @triton.jit @@ -1068,7 +1118,7 @@ def kernel(X, stride_xm, stride_xk, ADD_MATRIX: tl.constexpr, ADD_ROWS: tl.constexpr, ADD_COLS: tl.constexpr, ALLOW_TF32: tl.constexpr, DO_SOFTMAX: tl.constexpr, CHAIN_DOT: tl.constexpr, - TRANS_A: tl.constexpr, TRANS_B: tl.constexpr): + COL_A: tl.constexpr, COL_B: tl.constexpr): off_m = tl.arange(0, BLOCK_M) off_n = tl.arange(0, BLOCK_N) off_l = tl.arange(0, BLOCK_N) @@ -1077,7 +1127,9 @@ def kernel(X, stride_xm, stride_xk, Ys = Y + off_k[:, None] * stride_yk + off_n[None, :] * stride_yn Ws = W + off_n[:, None] * stride_wn + off_l[None, :] * stride_wl Zs = Z + off_m[:, None] * stride_zm + off_n[None, :] * stride_zn - z = tl.dot(tl.load(Xs), tl.load(Ys), trans_a=TRANS_A, trans_b=TRANS_B, allow_tf32=ALLOW_TF32) + x = tl.load(Xs) + y = tl.load(Ys) + z = tl.dot(x, y, allow_tf32=ALLOW_TF32) if ADD_MATRIX: z += tl.load(Zs) if ADD_ROWS: @@ -1093,16 +1145,24 @@ def kernel(X, stride_xm, stride_xk, den = tl.sum(num, 1) z = num / den[:, None] if CHAIN_DOT: - # tl.store(Zs, z) - # tl.debug_barrier() - z = tl.dot(z.to(tl.float16), tl.load(Ws), trans_a=TRANS_A) + w = tl.load(Ws) + z = tl.dot(z.to(w.dtype), w) tl.store(Zs, z) # input rs = RandomState(17) - x = numpy_random((K, M) if trans_a else (M, K), dtype_str=dtype, rs=rs) * .1 - y = numpy_random((N, K) if trans_b else (K, N), dtype_str=dtype, rs=rs) * .1 - w = numpy_random((N, N), dtype_str=dtype, rs=rs) * .1 - if allow_tf32: + if col_a: + x = numpy_random((K, M), dtype_str=dtype, rs=rs).T + else: + x = numpy_random((M, K), dtype_str=dtype, rs=rs) + if col_b: + y = numpy_random((N, K), dtype_str=dtype, rs=rs).T + else: + y = numpy_random((K, N), dtype_str=dtype, rs=rs) + w = numpy_random((N, N), dtype_str=dtype, rs=rs) + if 'int' not in dtype: + x *= .1 + y *= .1 + if dtype == 'float32' and allow_tf32: x = (x.view('uint32') & np.uint32(0xffffe000)).view('float32') y = (y.view('uint32') & np.uint32(0xffffe000)).view('float32') w = (w.view('uint32') & np.uint32(0xffffe000)).view('float32') @@ -1110,7 +1170,11 @@ def kernel(X, stride_xm, stride_xk, y_tri = to_triton(y, device=device) w_tri = to_triton(w, device=device) # triton result - z = 1 + numpy_random((M, N), dtype_str=dtype, rs=rs) * .1 + if dtype == 'int8': + z = 1 + numpy_random((M, N), dtype_str='int32', rs=rs) + else: + z = 1 + numpy_random((M, N), dtype_str=dtype, rs=rs) * .1 + z_tri = to_triton(z, device=device) if epilogue == 'trans': z_tri = torch.as_strided(z_tri, (M, N), z_tri.stride()[::-1]) @@ -1118,7 +1182,7 @@ def kernel(X, stride_xm, stride_xk, y_tri, y_tri.stride(0), y_tri.stride(1), w_tri, w_tri.stride(0), w_tri.stride(1), z_tri, z_tri.stride(0), z_tri.stride(1), - TRANS_A=trans_a, TRANS_B=trans_b, + COL_A=col_a, COL_B=col_b, BLOCK_M=M, BLOCK_K=K, BLOCK_N=N, ADD_MATRIX=epilogue == 'add-matrix', ADD_ROWS=epilogue == 'add-rows', @@ -1128,9 +1192,12 @@ def kernel(X, stride_xm, stride_xk, ALLOW_TF32=allow_tf32, num_warps=num_warps) # torch result - x_ref = x.T if trans_a else x - y_ref = y.T if trans_b else y - z_ref = np.matmul(x_ref, y_ref) + if dtype == 'int8': + z_ref = np.matmul(x.astype(np.float32), + y.astype(np.float32())).astype(np.int32) + else: + z_ref = np.matmul(x, y) + if epilogue == 'add-matrix': z_ref += z if epilogue == 'add-rows': @@ -1142,35 +1209,39 @@ def kernel(X, stride_xm, stride_xk, denom = np.sum(num, axis=-1, keepdims=True) z_ref = num / denom if epilogue == 'chain-dot': - z_ref = np.matmul(z_ref.T if trans_a else z_ref, w) + z_ref = np.matmul(z_ref, w) # compare # print(z_ref[:,0], z_tri[:,0]) - np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01) + if dtype == 'float32': + # XXX: Somehow there's a larger difference when we use float32 + np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01, atol=1e-3) + else: + np.testing.assert_allclose(z_ref, to_numpy(z_tri), rtol=0.01) # make sure ld/st are vectorized ptx = pgm.asm['ptx'] assert 'ld.global.v4' in ptx assert 'st.global.v4' in ptx - if allow_tf32: + if dtype == 'float32' and allow_tf32: assert 'mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32' in ptx - elif dtype == 'float32': + elif dtype == 'float32' and allow_tf32: assert 'mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32' not in ptx elif dtype == 'int8': assert 'mma.sync.aligned.m16n8k32.row.col.satfinite.s32.s8.s8.s32' in ptx - -def test_dot_without_load(): - @triton.jit - def kernel(out): - pid = tl.program_id(axis=0) - a = tl.zeros((32, 32), tl.float32) - b = tl.zeros((32, 32), tl.float32) - c = tl.zeros((32, 32), tl.float32) - c = tl.dot(a, b) - pout = out + tl.arange(0, 32)[:, None] * 32 + tl.arange(0, 32)[None, :] - tl.store(pout, c) - - out = torch.ones((32, 32), dtype=torch.float32, device="cuda") - kernel[(1,)](out) +# FIXME: Unsupported layout found in ConvertSplatLikeOp +# def test_dot_without_load(): +# @triton.jit +# def kernel(out): +# pid = tl.program_id(axis=0) +# a = tl.zeros((32, 32), tl.float32) +# b = tl.zeros((32, 32), tl.float32) +# c = tl.zeros((32, 32), tl.float32) +# c = tl.dot(a, b) +# pout = out + tl.arange(0, 32)[:, None] * 32 + tl.arange(0, 32)[None, :] +# tl.store(pout, c) +# +# out = torch.ones((32, 32), dtype=torch.float32, device="cuda") +# kernel[(1,)](out) # --------------- # test arange @@ -1216,7 +1287,7 @@ def test_masked_load(dtype_str, size, size_diff, device='cuda'): def _kernel(in_ptr, out_ptr, in_size: tl.constexpr, out_size: tl.constexpr): in_offsets = tl.arange(0, out_size) # Load inputs. - x = tl.load(in_ptr + in_offsets, mask=in_offsets < in_size, other=1.0) + x = tl.load(in_ptr + in_offsets, mask=in_offsets < in_size, other=1) # Store output output_offsets = tl.arange(0, out_size) tl.store(out_ptr + output_offsets, x) @@ -1227,16 +1298,12 @@ def _kernel(in_ptr, out_ptr, in_size: tl.constexpr, out_size: tl.constexpr): reference_out = torch.cat((reference_out, torch.ones((size_diff,), dtype=dtype, device=device))) triton.testing.allclose(output, reference_out) - # 'bfloat16': torch.bfloat16, # Testing masked loads with an intermate copy to shared memory run. + @pytest.mark.parametrize("dtype", [torch.bfloat16, torch.float16, torch.float32]) def test_masked_load_shared_memory(dtype, device='cuda'): - cc = _triton.runtime.cc(_triton.runtime.backend.CUDA, torch.cuda.current_device()) - if cc < 70: - pytest.skip("Only test tl.dot() on devices with sm >= 70") - check_type_supported(dtype) # bfloat16 on cc < 80 will not be tested M = 32 @@ -1325,6 +1392,7 @@ def _kernel(dst, src, N, BLOCK_SIZE: tl.constexpr): else: assert "ld.global.b32" in ptx # triton.testing.assert_almost_equal(dst, src[:N]) + # --------------- # test store # --------------- @@ -1402,6 +1470,10 @@ def kernel(VALUE, X): JITFunction.cache_hook = None assert spec_type == value_type +# -------------------- +# value specialization +# -------------------- + @pytest.mark.parametrize( "value, overflow", @@ -1552,9 +1624,23 @@ def _kernel(dst): # ------------- +def system_libdevice_path() -> str: + _SYSTEM_LIBDEVICE_SEARCH_PATHS = [ + '/usr/lib/cuda/nvvm/libdevice/libdevice.10.bc', + '/usr/local/cuda/nvvm/libdevice/libdevice.10.bc', + ] + SYSTEM_LIBDEVICE_PATH: Optional[str] = None + for _p in _SYSTEM_LIBDEVICE_SEARCH_PATHS: + if os.path.exists(_p): + SYSTEM_LIBDEVICE_PATH = _p + assert SYSTEM_LIBDEVICE_PATH is not None, \ + "Could not find libdevice.10.bc path" + return SYSTEM_LIBDEVICE_PATH + + @pytest.mark.parametrize("dtype_str, expr, lib_path", [('int32', 'libdevice.ffs', ''), - ('float32', 'libdevice.pow', '/usr/local/cuda/nvvm/libdevice/libdevice.10.bc'), + ('float32', 'libdevice.pow', system_libdevice_path()), ('float64', 'libdevice.norm4d', '')]) def test_libdevice_tensor(dtype_str, expr, lib_path): @@ -1621,3 +1707,95 @@ def kernel(X, Y, BLOCK: tl.constexpr): kernel[(1,)](x_tri, y_tri, BLOCK=shape[0], extern_libs={'libdevice': lib_path}) # compare np.testing.assert_allclose(y_ref, to_numpy(y_tri), rtol=0.01) + +# ----------------------- +# test layout conversions +# ----------------------- +# TODO: backend hsould be tested separately + + +class MmaLayout: + def __init__(self, version, warps_per_cta): + self.version = version + self.warps_per_cta = str(warps_per_cta) + + def __str__(self): + return f"#triton_gpu.mma<{{versionMajor={self.version[0]}, versionMinor={self.version[1]}, warpsPerCTA={self.warps_per_cta}}}>" + + +class BlockedLayout: + def __init__(self, size_per_thread, threads_per_warp, warps_per_cta, order): + self.sz_per_thread = str(size_per_thread) + self.threads_per_warp = str(threads_per_warp) + self.warps_per_cta = str(warps_per_cta) + self.order = str(order) + + def __str__(self): + return f"#triton_gpu.blocked<{{sizePerThread={self.sz_per_thread}, threadsPerWarp={self.threads_per_warp}, warpsPerCTA={self.warps_per_cta}, order={self.order}}}>" + + +layouts = [ + # MmaLayout(version=1, warps_per_cta=[1, 4]), + MmaLayout(version=(2, 0), warps_per_cta=[1, 4]), + # MmaLayout(version=1, warps_per_cta=[4, 1]), + MmaLayout(version=(2, 0), warps_per_cta=[4, 1]), + BlockedLayout([1, 8], [2, 16], [4, 1], [1, 0]), + BlockedLayout([1, 4], [4, 8], [2, 2], [1, 0]), + BlockedLayout([1, 1], [1, 32], [2, 2], [1, 0]), + BlockedLayout([8, 1], [16, 2], [1, 4], [0, 1]), + BlockedLayout([4, 1], [8, 4], [2, 2], [0, 1]), + BlockedLayout([1, 1], [32, 1], [2, 2], [0, 1]), + BlockedLayout([4, 4], [1, 32], [4, 1], [1, 0]) +] + + +@pytest.mark.parametrize("shape", [(128, 128)]) +@pytest.mark.parametrize("dtype", ['float16']) +@pytest.mark.parametrize("src_layout", layouts) +@pytest.mark.parametrize("dst_layout", layouts) +def test_convert2d(dtype, shape, src_layout, dst_layout, device='cuda'): + if str(src_layout) == str(dst_layout): + pytest.skip() + if 'mma' in str(src_layout) and 'mma' in str(dst_layout): + pytest.skip() + + ir = f""" +#src = {src_layout} +#dst = {dst_layout} +""" + """ +module attributes {"triton_gpu.num-warps" = 4 : i32} { + func public @kernel_0d1d(%arg0: !tt.ptr {tt.divisibility = 16 : i32}, %arg1: !tt.ptr {tt.divisibility = 16 : i32}) { + %cst = arith.constant dense<128> : tensor<128x1xi32, #src> + %0 = tt.make_range {end = 128 : i32, start = 0 : i32} : tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #src}>> + %1 = tt.make_range {end = 128 : i32, start = 0 : i32} : tensor<128xi32, #triton_gpu.slice<{dim = 0, parent = #src}>> + %2 = tt.splat %arg0 : (!tt.ptr) -> tensor<128x128x!tt.ptr, #src> + %4 = tt.expand_dims %0 {axis = 1 : i32} : (tensor<128xi32, #triton_gpu.slice<{dim = 1, parent = #src}>>) -> tensor<128x1xi32, #src> + %5 = arith.muli %4, %cst : tensor<128x1xi32, #src> + %6 = tt.expand_dims %1 {axis = 0 : i32} : (tensor<128xi32, #triton_gpu.slice<{dim = 0, parent = #src}>>) -> tensor<1x128xi32, #src> + %7 = tt.broadcast %6 : (tensor<1x128xi32, #src>) -> tensor<128x128xi32, #src> + %8 = tt.broadcast %5 : (tensor<128x1xi32, #src>) -> tensor<128x128xi32, #src> + %9 = arith.addi %8, %7 : tensor<128x128xi32, #src> + %10 = tt.addptr %2, %9 : tensor<128x128x!tt.ptr, #src>, tensor<128x128xi32, #src> + %11 = tt.load %10 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x128xf16, #src> + %3 = tt.splat %arg1 : (!tt.ptr) -> tensor<128x128x!tt.ptr, #dst> + %12 = triton_gpu.convert_layout %9 : (tensor<128x128xi32, #src>) -> tensor<128x128xi32, #dst> + %13 = triton_gpu.convert_layout %11 : (tensor<128x128xf16, #src>) -> tensor<128x128xf16, #dst> + %14 = tt.addptr %3, %12 : tensor<128x128x!tt.ptr, #dst>, tensor<128x128xi32, #dst> + tt.store %14, %13 : tensor<128x128xf16, #dst> + return + } +} +""" + + x = to_triton(numpy_random(shape, dtype_str=dtype)) + z = torch.empty_like(x) + + # write the IR to a temporary file using mkstemp + import tempfile + with tempfile.NamedTemporaryFile(mode='w', suffix='.ttgir') as f: + f.write(ir) + f.flush() + kernel = triton.compile(f.name) + kernel[(1, 1, 1)](x.data_ptr(), z.data_ptr()) + + assert torch.equal(z, x) diff --git a/python/test/unit/language/test_dequantize.py b/python/test/unit/language/test_dequantize.py deleted file mode 100644 index 93935a4b0009..000000000000 --- a/python/test/unit/language/test_dequantize.py +++ /dev/null @@ -1,261 +0,0 @@ -# flake8: noqa: F821,F841 - -import random - -import torch - -import triton -import triton.language as tl - - -@triton.jit -def dequantize_kernel_int8(output_ptr, input_ptr, size, BLOCK_SIZE: tl.constexpr): - w_offsets = tl.arange(0, BLOCK_SIZE // 4) - mask = w_offsets < (size // 4) - input_ptrs = input_ptr + 1 + w_offsets - input = tl.load(input_ptrs, mask=mask, other=0) - scale_shift = tl.load(input_ptr) - scale = (scale_shift & 65535).to(tl.int16).to(tl.float16, bitcast=True) - shift = (scale_shift >> 16).to(tl.int16).to(tl.float16, bitcast=True) - output = tl.dequantize(input, scale, shift, 8) - offsets = tl.arange(0, BLOCK_SIZE) - output_ptrs = tl.multiple_of(output_ptr + offsets, 4) - tl.store(output_ptrs, output, mask=offsets < size) - - -@triton.jit -def dequantize_kernel_scale_shift_int8( - output_ptr, input_ptr, scale_ptr, shift_ptr, size, BLOCK_SIZE: tl.constexpr -): - w_offsets = tl.arange(0, BLOCK_SIZE // 4) - mask = w_offsets < (size // 4) - input_ptrs = tl.multiple_of(input_ptr + w_offsets, 1) - input = tl.load(input_ptrs, mask=mask, other=0) - scale = tl.load(scale_ptr) - shift = tl.load(shift_ptr) - output = tl.dequantize(input, scale, shift, 8) - offsets = tl.arange(0, BLOCK_SIZE) - output_ptrs = tl.multiple_of(output_ptr + offsets, 4) - tl.store(output_ptrs, output, mask=offsets < size) - - -@triton.jit -def dequantize_kernel_int4(output_ptr, input_ptr, size, BLOCK_SIZE: tl.constexpr): - w_offsets = tl.arange(0, BLOCK_SIZE // 8) - mask = w_offsets < (size // 8) - input_ptrs = input_ptr + 1 + w_offsets - input = tl.load(input_ptrs, mask=mask, other=0) - scale_shift = tl.load(input_ptr) - scale = (scale_shift & 65535).to(tl.int16).to(tl.float16, bitcast=True) - shift = (scale_shift >> 16).to(tl.int16).to(tl.float16, bitcast=True) - output = tl.dequantize(input, scale, shift, 4) - offsets = tl.arange(0, BLOCK_SIZE) - output_ptrs = tl.multiple_of(output_ptr + offsets, 8) - tl.store(output_ptrs, output, mask=offsets < size) - - -@triton.jit -def dequantize_kernel_scale_shift_int4( - output_ptr, input_ptr, scale_ptr, shift_ptr, size, BLOCK_SIZE: tl.constexpr -): - w_offsets = tl.arange(0, BLOCK_SIZE // 8) - mask = w_offsets < (size // 8) - input_ptrs = tl.multiple_of(input_ptr + w_offsets, 1) - input = tl.load(input_ptrs, mask=mask, other=0) - scale = tl.load(scale_ptr) - shift = tl.load(shift_ptr) - output = tl.dequantize(input, scale, shift, 4) - offsets = tl.arange(0, BLOCK_SIZE) - output_ptrs = tl.multiple_of(output_ptr + offsets, 8) - tl.store(output_ptrs, output, mask=offsets < size) - - -@triton.jit -def dequantize_kernel_int2(output_ptr, input_ptr, size, BLOCK_SIZE: tl.constexpr): - w_offsets = tl.arange(0, BLOCK_SIZE // 8) - mask = w_offsets < (size // 8) - input_ptrs = tl.multiple_of(input_ptr + 2 + w_offsets, 1) - input = tl.load(input_ptrs, mask=mask, other=0) - scale = tl.load(input_ptr).to(tl.float16, bitcast=True) - shift = tl.load(input_ptr + 1).to(tl.float16, bitcast=True) - output = tl.dequantize(input, scale, shift, 2) - offsets = tl.arange(0, BLOCK_SIZE) - output_ptrs = tl.multiple_of(output_ptr + offsets, 8) - tl.store(output_ptrs, output, mask=offsets < size) - - -@triton.jit -def dequantize_kernel_scale_shift_int2( - output_ptr, input_ptr, scale_ptr, shift_ptr, size, BLOCK_SIZE: tl.constexpr -): - w_offsets = tl.arange(0, BLOCK_SIZE // 8) - mask = w_offsets < (size // 8) - input_ptrs = tl.multiple_of(input_ptr + w_offsets, 1) - input = tl.load(input_ptrs, mask=mask, other=0) - scale = tl.load(scale_ptr) - shift = tl.load(shift_ptr) - output = tl.dequantize(input, scale, shift, 2) - offsets = tl.arange(0, BLOCK_SIZE) - output_ptrs = tl.multiple_of(output_ptr + offsets, 8) - tl.store(output_ptrs, output, mask=offsets < size) - - -def test_dequantize_int8() -> None: - for i in range(10): - if i < 5: - size = random.randrange(16, 128, 4) - else: - size = random.randrange(132, 1024, 4) - device = torch.device(torch.cuda.current_device()) - - scale_val = random.uniform(0.1, 4.0) - shift_val = random.uniform(-10.0, 10.0) - scale = torch.tensor(scale_val, dtype=torch.float16, device=device) - shift = torch.tensor(shift_val, dtype=torch.float16, device=device) - scale_shift = torch.tensor( - [scale_val, shift_val], - dtype=torch.float16, - device=device, - ).view(torch.int32) - - input_int8 = torch.randint( - 0, 256, (size,), dtype=torch.uint8, device=device - ) - input_int32 = input_int8.view(torch.int32) - - input = torch.cat((scale_shift, input_int32)) - expected = (input_int8 * scale + shift).to(torch.float16) - - output = torch.empty([size], dtype=torch.float16, device=device) - block_size = max(triton.next_power_of_2(size), 128) - grid = (1,) - dequantize_kernel_int8[grid]( - output, input, size, BLOCK_SIZE=block_size, num_warps=1 - ) - rtol, atol = 1e-02, 1e-02 - assert torch.allclose(output, expected, rtol, atol) - - output = torch.empty([size], dtype=torch.float16, device=device) - dequantize_kernel_scale_shift_int8[grid]( - output, - input_int32, - scale, - shift, - size, - BLOCK_SIZE=block_size, - num_warps=1, - ) - assert torch.allclose(output, expected, rtol, atol) - - -def test_dequantize_int4() -> None: - for i in range(10): - if i < 5: - size = random.randrange(16, 256, 8) - else: - size = random.randrange(264, 1024, 8) - device = torch.device(torch.cuda.current_device()) - - scale_val = random.uniform(0.1, 4.0) - shift_val = random.uniform(-10.0, 10.0) - scale = torch.tensor(scale_val, dtype=torch.float16, device=device) - shift = torch.tensor(shift_val, dtype=torch.float16, device=device) - scale_shift = torch.tensor( - [scale_val, shift_val], - dtype=torch.float16, - device=device, - ).view(torch.int32) - - input_int8 = torch.randint( - 0, 256, (size // 2,), dtype=torch.uint8, device=device - ) - input_int32 = input_int8.view(torch.int32) - - input_int8_h1 = input_int8 >> 4 - input_int8_h0 = input_int8 & 15 - - input_int4_val = torch.stack( - (input_int8_h0, input_int8_h1), dim=1 - ).flatten() - - input = torch.cat((scale_shift, input_int32)) - expected = (input_int4_val * scale + shift).to(torch.float16) - - output = torch.empty([size], dtype=torch.float16, device=device) - block_size = max(triton.next_power_of_2(size), 256) - grid = (1,) - dequantize_kernel_int4[grid]( - output, input, size, BLOCK_SIZE=block_size, num_warps=1 - ) - rtol, atol = 1e-02, 1e-02 - assert torch.allclose(output, expected, rtol, atol) - - output = torch.empty([size], dtype=torch.float16, device=device) - dequantize_kernel_scale_shift_int4[grid]( - output, - input_int32, - scale, - shift, - size, - BLOCK_SIZE=block_size, - num_warps=1, - ) - assert torch.allclose(output, expected, rtol, atol) - - -def test_dequantize_int2() -> None: - for i in range(10): - if i < 5: - size = random.randrange(16, 256, 8) - else: - size = random.randrange(264, 1024, 8) - device = torch.device(torch.cuda.current_device()) - - scale_val = random.uniform(0.1, 4.0) - shift_val = random.uniform(-10.0, 10.0) - scale = torch.tensor(scale_val, dtype=torch.float16, device=device) - shift = torch.tensor(shift_val, dtype=torch.float16, device=device) - scale_shift = torch.tensor( - [scale_val, shift_val], - dtype=torch.float16, - device=device, - ).view(torch.int16) - - input_int8 = torch.randint( - 0, 256, (size // 4,), dtype=torch.uint8, device=device - ) - input_int16 = input_int8.view(torch.int16) - - input_int8_q3 = input_int8 >> 6 - input_int8_q2 = (input_int8 >> 4) & 3 - input_int8_q1 = (input_int8 >> 2) & 3 - input_int8_q0 = input_int8 & 3 - - input_int2_val = torch.stack( - (input_int8_q0, input_int8_q1, input_int8_q2, input_int8_q3), dim=1 - ).flatten() - - input = torch.cat((scale_shift, input_int16)) - expected = (input_int2_val * scale + shift).to(torch.float16) - - output = torch.empty([size], dtype=torch.float16, device=device) - block_size = max(triton.next_power_of_2(size), 256) - grid = (1,) - - dequantize_kernel_int2[grid]( - output, input, size, BLOCK_SIZE=block_size, num_warps=1 - ) - rtol, atol = 1e-02, 1e-02 - assert torch.allclose(output, expected, rtol, atol) - - output = torch.empty([size], dtype=torch.float16, device=device) - dequantize_kernel_scale_shift_int2[grid]( - output, - input_int16, - scale, - shift, - size, - BLOCK_SIZE=block_size, - num_warps=1, - ) - assert torch.allclose(output, expected, rtol, atol) diff --git a/python/test/unit/language/test_printf.py b/python/test/unit/language/test_printf.py new file mode 100644 index 000000000000..d4872d0107e1 --- /dev/null +++ b/python/test/unit/language/test_printf.py @@ -0,0 +1,22 @@ +import os +import subprocess +import sys + +dir_path = os.path.dirname(os.path.realpath(__file__)) +printf_path = os.path.join(dir_path, "printf_helper.py") + + +def test_printf(): + proc = subprocess.Popen([sys.executable, printf_path], stdout=subprocess.PIPE, shell=False) + (outs, err) = proc.communicate() + outs = outs.split() + new_lines = set() + for line in outs: + try: + value = int(float(line)) + new_lines.add(value) + except Exception as e: + print(e) + for i in range(128): + assert i in new_lines + assert len(new_lines) == 128 diff --git a/python/test/unit/operators/test_blocksparse.py b/python/test/unit/operators/test_blocksparse.py index ebe36e25409c..33e89be1f1ea 100644 --- a/python/test/unit/operators/test_blocksparse.py +++ b/python/test/unit/operators/test_blocksparse.py @@ -2,13 +2,13 @@ import torch import triton -import triton._C.libtriton.triton as _triton @pytest.mark.parametrize("MODE", ["sdd", "dds", "dsd"]) @pytest.mark.parametrize("TRANS_A", [False, True]) @pytest.mark.parametrize("TRANS_B", [False, True]) @pytest.mark.parametrize("BLOCK", [16, 32, 64]) +# TODO: float32 fails @pytest.mark.parametrize("DTYPE", [torch.float16]) def test_matmul(MODE, TRANS_A, TRANS_B, BLOCK, DTYPE, Z=3, H=2, M=512, N=384, K=256): seed = 0 @@ -32,9 +32,9 @@ def test_matmul(MODE, TRANS_A, TRANS_B, BLOCK, DTYPE, Z=3, H=2, M=512, N=384, K= layout[1, 2, :] = 0 layout[1, :, 1] = 0 # create data - a_ref, a_tri = triton.testing.make_pair(a_shape, alpha=.1) - b_ref, b_tri = triton.testing.make_pair(b_shape, alpha=.1) - dc_ref, dc_tri = triton.testing.make_pair(c_shape) + a_ref, a_tri = triton.testing.make_pair(a_shape, alpha=.1, dtype=DTYPE) + b_ref, b_tri = triton.testing.make_pair(b_shape, alpha=.1, dtype=DTYPE) + dc_ref, dc_tri = triton.testing.make_pair(c_shape, dtype=DTYPE) # compute [torch] dc_ref = do_mask(dc_ref) if is_sdd else dc_ref a_ref = do_mask(a_ref) if is_dsd else a_ref @@ -126,8 +126,8 @@ def test_attention_fwd_bwd( batch_size=2, n_heads=2, ): - cc = _triton.runtime.cc(_triton.runtime.backend.CUDA, torch.cuda.current_device()) - if cc < 70: + capability = torch.cuda.get_device_capability() + if capability[0] < 7: pytest.skip("Only test tl.dot() on devices with sm >= 70") # inputs diff --git a/python/test/unit/operators/test_cross_entropy.py b/python/test/unit/operators/test_cross_entropy.py index e28db48150c8..67744820fcb7 100644 --- a/python/test/unit/operators/test_cross_entropy.py +++ b/python/test/unit/operators/test_cross_entropy.py @@ -2,20 +2,19 @@ import torch import triton -import triton._C.libtriton.triton as _triton @pytest.mark.parametrize("M, N, dtype, mode", [ (M, N, dtype, mode) for M in [1024, 821] for N in [512, 857, 1871, 2089, 8573, 31000] - for dtype in ['bfloat16', 'float16', 'float32'] + for dtype in ['float16', 'float32'] for mode in ['forward', 'backward'] ] ) def test_op(M, N, dtype, mode): - cc = _triton.runtime.cc(_triton.runtime.backend.CUDA, torch.cuda.current_device()) - if cc < 80 and dtype == "bfloat16": + capability = torch.cuda.get_device_capability() + if capability[0] < 8 and dtype == "bfloat16": pytest.skip("Only test bfloat16 on devices with sm >= 80") dtype = {'bfloat16': torch.bfloat16, 'float16': torch.float16, 'float32': torch.float32}[dtype] # create inputs diff --git a/python/test/unit/operators/test_matmul.py b/python/test/unit/operators/test_matmul.py index 8d20fbae34d6..8ed5dc9cadea 100644 --- a/python/test/unit/operators/test_matmul.py +++ b/python/test/unit/operators/test_matmul.py @@ -4,7 +4,6 @@ import torch import triton -import triton._C.libtriton.triton as _triton @pytest.mark.parametrize( @@ -67,10 +66,10 @@ ), ) def test_op(BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, NWARP, NSTAGE, M, N, K, AT, BT, DTYPE): - cc = _triton.runtime.cc(_triton.runtime.backend.CUDA, torch.cuda.current_device()) - if cc < 70: + capability = torch.cuda.get_device_capability() + if capability[0] < 7: pytest.skip("Only test tl.dot() on devices with sm >= 70") - if cc < 80 and DTYPE == "bfloat16": + if capability[0] < 8 and DTYPE == "bfloat16": pytest.skip("Only test bfloat16 on devices with sm >= 80") if DTYPE == "bfloat16" and SPLIT_K != 1: pytest.skip("bfloat16 matmuls don't allow split_k for now") diff --git a/python/triton/__init__.py b/python/triton/__init__.py index c620543ee69d..9b43de73d463 100644 --- a/python/triton/__init__.py +++ b/python/triton/__init__.py @@ -1,15 +1,52 @@ """isort:skip_file""" -# flake8: noqa: F401 __version__ = '2.0.0' +# --------------------------------------- +# Note: import order is significant here. + # TODO: torch needs to be imported first # or pybind11 shows `munmap_chunk(): invalid pointer` -import torch +import torch # noqa: F401 + # submodules -from .utils import * -from .runtime import Config, autotune, heuristics, JITFunction, KernelInterface +from . import impl +from .utils import ( + cdiv, + MockTensor, + next_power_of_2, + reinterpret, + TensorWrapper, +) +from .runtime import ( + autotune, + Config, + heuristics, + JITFunction, + KernelInterface, +) from .runtime.jit import jit from .compiler import compile, CompilationError from . import language from . import testing from . import ops + +__all__ = [ + "autotune", + "cdiv", + "CompilationError", + "compile", + "Config", + "heuristics", + "impl", + "jit", + "JITFunction", + "KernelInterface", + "language", + "MockTensor", + "next_power_of_2", + "ops", + "reinterpret", + "runtime", + "TensorWrapper", + "testing", +] diff --git a/python/triton/compiler.py b/python/triton/compiler.py index a966c9ce475f..96ebfc3cf986 100644 --- a/python/triton/compiler.py +++ b/python/triton/compiler.py @@ -7,14 +7,17 @@ import io import json import os +import re import shutil import subprocess import sys import sysconfig import tempfile import warnings +from collections import namedtuple +from pathlib import Path from sysconfig import get_paths -from typing import Any, Dict, Set, Tuple, Union +from typing import Any, Callable, Dict, Tuple, Union import setuptools import torch @@ -22,6 +25,7 @@ import triton import triton._C.libtriton.triton as _triton +from . import impl from .tools.disasm import extract @@ -30,12 +34,12 @@ def str_to_ty(name): ty = str_to_ty(name[1:]) return triton.language.pointer_type(ty) tys = { - "i1": triton.language.int1, "fp8": triton.language.float8, "fp16": triton.language.float16, "bf16": triton.language.bfloat16, "fp32": triton.language.float32, "fp64": triton.language.float64, + "i1": triton.language.int1, "i8": triton.language.int8, "i16": triton.language.int16, "i32": triton.language.int32, @@ -64,43 +68,56 @@ def mangle_ty(ty): return 'fp32' if ty.is_fp64(): return 'fp64' - if ty.is_void(): - return 'V' if ty.is_block(): elt = mangle_ty(ty.scalar) shape = '_'.join(map(str, ty.shape)) return f'{elt}S{shape}S' + if ty.is_void(): + return 'V' assert False, "Unsupported type" def mangle_fn(name, arg_tys, constants): # doesn't mangle ret type, which must be a function of arg tys mangled_arg_names = '_'.join([mangle_ty(ty) for ty in arg_tys]) - key = lambda x: x.__name__ if isinstance(x, triton.runtime.JITFunction) else repr(x) - mangled_constants = '_'.join([f'{i}c{key(constants[i])}' for i in sorted(constants)]) + mangled_constants = '_'.join([f'{i}c{repr(constants[i])}' for i in sorted(constants)]) mangled_constants = mangled_constants.replace('.', '_d_') mangled_constants = mangled_constants.replace("'", '_sq_') - mangled_constants = mangled_constants.replace("e-", '_em_') ret = f'{name}__{mangled_arg_names}__{mangled_constants}' return ret -def is_triton_tensor(value): - return isinstance(value, triton.language.tensor) +class enter_sub_region: + def __init__(self, generator: CodeGenerator): + self.generator = generator + + def __enter__(self): + # record lscope & local_defs in the parent scope + self.liveins = self.generator.lscope.copy() + self.prev_defs = self.generator.local_defs.copy() + self.generator.local_defs = {} + self.insert_block = self.generator.builder.get_insertion_block() + return self.liveins, self.insert_block + + def __exit__(self, *args, **kwargs): + self.generator.builder.set_insertion_point_to_end(self.insert_block) + self.generator.lscope = self.liveins + self.generator.local_defs = self.prev_defs -class ValueConstructor: - def __init__(self, module, builder, gscope) -> None: +class CodeGenerator(ast.NodeVisitor): + def __init__(self, context, prototype, gscope, attributes, constants, function_name, module=None, is_kernel=False, function_types=dict()): + self.builder = _triton.ir.builder(context) + self.module = self.builder.create_module() if module is None else module + self.function_ret_types = function_types + self.prototype = prototype self.gscope = gscope self.lscope = dict() - self.builder = builder - self.module = module - # [name, bb] => triton.language.tensor - self.lvalues: Dict[Tuple[str, _triton.ir.basic_block], triton.language.tensor] = {} - # bb => {name => phi} - self.incomplete_phis = {} - self.sealed_blocks: Set[_triton.ir.basic_block] = set() - # + self.attributes = attributes + self.constants = constants + self.function_name = function_name + self.is_kernel = is_kernel + self.last_node = None self.builtins = { 'range': range, 'min': triton.language.minimum, @@ -110,6 +127,10 @@ def __init__(self, module, builder, gscope) -> None: 'isinstance': isinstance, 'getattr': getattr, } + # SSA-construction + # name => triton.language.tensor + self.local_defs: Dict[str, triton.language.tensor] = {} + self.global_uses: Dict[str, triton.language.tensor] = {} def get_value(self, name): ''' This function: @@ -121,6 +142,8 @@ def get_value(self, name): ret = None if name in self.lscope: ret = self.lscope[name] + if name not in self.local_defs: + self.global_uses[name] = ret # search node.id in global scope elif name in self.gscope: ret = self.gscope[name] @@ -129,8 +152,6 @@ def get_value(self, name): ret = self.builtins[name] else: raise ValueError(f'{name} is not defined') - if is_triton_tensor(ret): - return self._get_tensor(name, self.builder.get_insert_block()) return ret def set_value(self, name: str, @@ -141,117 +162,17 @@ def set_value(self, name: str, 2. store tensor in self.lvalue ''' self.lscope[name] = value - if isinstance(value, triton.language.tensor): - self._set_value(name, self.builder.get_insert_block(), value) - - # - # SSA-construction - # - def _get_tensor(self, name: str, bb: _triton.ir.basic_block) -> triton.language.tensor: - # local value numbering - if (name, bb) in self.lvalues: - return self.lvalues[(name, bb)] - # global value numbering - saved_insert_point = self.builder.get_insert_point() - result = self._get_tensor_recursive(name, bb) - self.builder.set_insert_point(saved_insert_point) - return result - - def _get_tensor_recursive(self, name: str, bb: _triton.ir.basic_block) -> triton.language.tensor: - preds = bb.get_predecessors() - type = self.lscope[name].type - # some preds haven't been filled, create a phi as a proxy of the value - if bb not in self.sealed_blocks: - result = self._make_phi(type, len(preds), bb) - if bb in self.incomplete_phis: - self.incomplete_phis[bb][name] = result - else: - self.incomplete_phis[bb] = {name: result} - elif len(preds) == 1: - # one predecessor: no phi needed, try get value from pred - result = self._get_tensor(name, preds[0]) - elif len(preds) == 0: - result = self._get_tensor(name, None) - else: # multiple preds - phi = self._make_phi(type, len(preds), bb) - self._set_value(name, bb, phi) - result = self._add_phi_operands(name, phi) - self._set_value(name, bb, result) - return result - - # returns a new phi tensor, which encausulate an ir.phi_node - def _make_phi(self, - type: triton.language.dtype, - num_values: int, - bb: _triton.ir.basic_block) -> triton.language.tensor: - instr = bb.get_first_non_phi() - self.builder.set_insert_point((bb, instr)) - ir_phi = self.builder.create_phi(type.to_ir(self.builder), num_values) - if instr: - self.builder.set_insert_block(bb) - return triton.language.tensor(ir_phi, type) - - # complete a phi node. (TODO: rename this as _complete_phis?) - # Note: since we try to remove tryival phi, the return tensor might not be a phi - def _add_phi_operands(self, name: str, - phi: triton.language.tensor) -> triton.language.tensor: - bb = phi.handle.get_parent() - for pred in bb.get_predecessors(): - v = self._get_tensor(name, pred) - phi.handle.add_incoming(v.handle, pred) - phi = self._try_remove_trivial_phi(phi) - return phi - - def _set_value(self, name: str, bb: _triton.ir.basic_block, value: triton.language.tensor) -> None: - self.lvalues[(name, bb)] = value - # TODO: why we need this? - self.module.set_instr_metadata(name, value.handle) - - def _seal_block(self, bb: _triton.ir.basic_block): - # complete all incomplete phis - if bb in self.incomplete_phis: - for name, phi in self.incomplete_phis[bb].items(): - result = self._add_phi_operands(name, phi) - # it's possible that this phi is trivial - if self._get_tensor(name, bb).handle == phi.handle: - self._set_value(name, bb, result) - del self.incomplete_phis[bb] - self.sealed_blocks.add(bb) - - def _try_remove_trivial_phi(self, phi: triton.language.tensor) -> triton.language.tensor: - unique_handles = {op for op in phi.handle.ops() if op != phi.handle} - if len(unique_handles) != 1: # non-trivial phi - return phi - v = unique_handles.pop() - phi.handle.replace_all_uses_with(v) - # phi.handle.erase_from_parent() - # TODO: remove trivial phis recursively - return triton.language.tensor(v, phi.type) - - -class CodeGenerator(ast.NodeVisitor): - - def __init__(self, context, prototype, gscope, attributes, constants, function_name, spec_to_1=None, prototypes=None, module=None, is_kernel=False): - self.spec_to_1 = set() if spec_to_1 is None else spec_to_1 - self.prototypes = dict() if prototypes is None else prototypes - self.builder = _triton.ir.builder(context) - self.module = _triton.ir.module('', self.builder) if module is None else module - self.prototype = prototype - self.attributes = attributes - self.constants = constants - self.last_node = None - self.function_name = function_name - self.is_kernel = is_kernel + self.local_defs[name] = value - self.value_constructor = ValueConstructor(self.module, self.builder, gscope) + def is_triton_tensor(self, value): + return isinstance(value, triton.language.tensor) # # AST visitor # - def visit_compound_statement(self, stmts): for stmt in stmts: - self.last_ret = self.visit(stmt) + self.last_ret_type = self.visit(stmt) if isinstance(stmt, ast.Return): break return stmts and isinstance(stmt, ast.Return) @@ -267,15 +188,22 @@ def visit_List(self, node): # By design, only non-kernel functions can return def visit_Return(self, node): - ret = self.visit(node.value) - if ret is None: - return triton.language.tensor(self.builder.ret_void(), triton.language.void) - ret = triton.language.core._to_tensor(ret, self.builder) - ret = triton.language.tensor(self.builder.ret(ret.handle), ret.type) - return ret + ret_value = self.visit(node.value) + if ret_value is None: + self.builder.ret([]) + return None + if isinstance(ret_value, tuple): + ret_values = [triton.language.core._to_tensor(v, self.builder) for v in ret_value] + ret_types = [v.type for v in ret_values] + self.builder.ret([v.handle for v in ret_values]) + return tuple(ret_types) + else: + ret = triton.language.core._to_tensor(ret_value, self.builder) + self.builder.ret([ret.handle]) + return ret.type def visit_FunctionDef(self, node): - arg_names, arg_annotations, kwarg_names = self.visit(node.args) + arg_names, kwarg_names = self.visit(node.args) # initialize defaults for i, default_value in enumerate(node.args.defaults): arg_node = node.args.args[-i - 1] @@ -288,58 +216,55 @@ def visit_FunctionDef(self, node): init_node = ast.AnnAssign(target=st_target, value=default_value, annotation=annotation) self.visit(init_node) # initialize function - self.prototypes[self.function_name] = self.prototype - fn = self.module.get_or_insert_function(self.function_name, self.prototype.to_ir(self.builder)) - fn.set_is_kernel(self.is_kernel) + visibility = "public" if self.is_kernel else "private" + fn = self.builder.get_or_insert_function(self.module, self.function_name, self.prototype.to_ir(self.builder), visibility) + self.module.push_back(fn) + entry = fn.add_entry_block() arg_values = [] idx = 0 - for i, (arg_name, annotation) in enumerate(zip(arg_names, arg_annotations)): + for i, arg_name in enumerate(arg_names): if i in self.constants: cst = self.constants[i] if not isinstance(cst, triton.language.constexpr): cst = triton.language.constexpr(self.constants[i]) arg_values.append(cst) continue - if i in self.attributes: - is_ptr = fn.args[idx].type.is_ptr() - attr = 'aligned' if is_ptr else 'multiple_of' - attr = getattr(_triton.ir.attribute_kind, attr) - attr = _triton.ir.attribute(attr, self.attributes[i][1]) - fn.add_attr(idx + 1, attr) - fn.args[idx].name = arg_name - arg_values.append(triton.language.tensor(fn.args[idx], self.prototype.param_types[idx])) - idx += 1 - - insert_pt = self.builder.get_insert_block() - entry = _triton.ir.basic_block.create(self.builder.context, "entry", fn) - self.builder.set_insert_block(entry) - self.value_constructor._seal_block(entry) + else: + if i in self.attributes: + fn.set_arg_attr(idx, "tt.divisibility", self.attributes[i][1]) + arg_values.append(triton.language.tensor(fn.args(idx), self.prototype.param_types[idx])) + idx += 1 + + insert_pt = self.builder.get_insertion_block() for arg_name, arg_value in zip(arg_names, arg_values): - self.value_constructor.set_value(arg_name, arg_value) + self.set_value(arg_name, arg_value) + self.builder.set_insertion_point_to_start(entry) # visit function body has_ret = self.visit_compound_statement(node.body) - # finalize + # finalize function if not has_ret: - self.builder.ret_void() + self.builder.ret([]) else: - # a bit hacky: we only know the return type at the last moment so we update type info here - self.module.reset_ret_ty(self.function_name, self.last_ret.type.to_ir(self.builder)) - self.prototype.ret_type = self.last_ret.type - self.builder.set_insert_block(insert_pt) + # update return type + if isinstance(self.last_ret_type, tuple): + self.prototype.ret_types = list(self.last_ret_type) + fn.reset_type(self.prototype.to_ir(self.builder)) + else: + self.prototype.ret_types = [self.last_ret_type] + fn.reset_type(self.prototype.to_ir(self.builder)) + if insert_pt: + self.builder.set_insertion_point_to_end(insert_pt) def visit_arguments(self, node): arg_names = [] - arg_annotations = [] for arg in node.args: - curr = self.visit(arg) - arg_names += [curr[0]] - arg_annotations += [curr[1]] + arg_names += [self.visit(arg)] kwarg_names = self.visit(node.kwarg) - return arg_names, arg_annotations, kwarg_names + return arg_names, kwarg_names def visit_arg(self, node): ast.NodeVisitor.generic_visit(self, node) - return node.arg, node.annotation + return node.arg def visit_AnnAssign(self, node): # extract attributes @@ -348,13 +273,13 @@ def visit_AnnAssign(self, node): value = self.visit(node.value) # constexpr if annotation == triton.language.constexpr: - if target in self.value_constructor.lscope: + if target in self.lscope: raise ValueError(f'{target} is already defined.' f' constexpr cannot be reassigned.') if not isinstance(value, triton.language.constexpr): value = triton.language.constexpr(value) - self.value_constructor.lscope[target] = value - return self.value_constructor.lscope[target] + self.lscope[target] = value + return self.lscope[target] # default: call visit_Assign return self.visit_Assign(node) @@ -369,23 +294,13 @@ def visit_Assign(self, node): names = [names] if not isinstance(values, tuple): values = [values] - if isinstance(values[0], triton.language.tensor) \ - and isinstance(values[0].type, triton.language.tuple_type): - struct = values[0].handle - tys = values[0].type.element_types - values = [self.builder.extract_value(struct, i) for i in range(len(tys))] - values = [triton.language.tensor(v, ty) for v, ty in zip(values, tys)] - assert len(values) == len(names) for name, value in zip(names, values): - # TODO: can we store constexpr here to support constant folding? # by default, constexpr are assigned into python variable if isinstance(value, triton.language.constexpr): value = value.value - if value is None: - raise ValueError(f'Cannot assign None to non-constexpr `{name}`. Please annotate as `: tl.constexpr`') if not isinstance(value, triton.language.tensor): value = triton.language.core._to_tensor(value, self.builder) - self.value_constructor.set_value(name, value) + self.set_value(name, value) def visit_AugAssign(self, node): name = node.target.id @@ -393,12 +308,12 @@ def visit_AugAssign(self, node): rhs = ast.BinOp(lhs, node.op, node.value) assign = ast.Assign(targets=[node.target], value=rhs) self.visit(assign) - return self.value_constructor.get_value(name) + return self.get_value(name) def visit_Name(self, node): if type(node.ctx) == ast.Store: return node.id - return self.value_constructor.get_value(node.id) + return self.get_value(node.id) def visit_Store(self, node): ast.NodeVisitor.generic_visit(self, node) @@ -408,27 +323,11 @@ def visit_Load(self, node): def visit_Tuple(self, node): args = [self.visit(x) for x in node.elts] - mode = type(args[0]) - # tuple of values -- create a struct - if len(args) > 1 and mode == triton.language.tensor\ - and all([type(arg) == mode for arg in args]): - tuple_ty = triton.language.tuple_type([arg.type for arg in args]) - ret = _triton.ir.undef.get(tuple_ty.to_ir(self.builder)) - for i, arg in enumerate(args): - ret = self.builder.insert_value(ret, arg.handle, i) - ret = triton.language.tensor(ret, tuple_ty) - return ret return tuple(args) def visit_BinOp(self, node): - # visit operand lhs = self.visit(node.left) rhs = self.visit(node.right) - is_lhs_constexpr = isinstance(lhs, triton.language.constexpr) - is_rhs_constexpr = isinstance(rhs, triton.language.constexpr) - lhs = lhs.value if is_lhs_constexpr else lhs - rhs = rhs.value if is_rhs_constexpr else rhs - # get function name fn = { ast.Add: '__add__', ast.Sub: '__sub__', @@ -443,13 +342,9 @@ def visit_BinOp(self, node): ast.BitOr: '__or__', ast.BitXor: '__xor__', }[type(node.op)] - # return a new constexpr if both arg are constexprs - if is_lhs_constexpr and is_rhs_constexpr: - return triton.language.constexpr(getattr(lhs, fn)(rhs)) - # call operator - if is_triton_tensor(lhs): + if self.is_triton_tensor(lhs): return getattr(lhs, fn)(rhs, _builder=self.builder) - elif is_triton_tensor(rhs): + elif self.is_triton_tensor(rhs): fn = fn[:2] + 'r' + fn[2:] return getattr(rhs, fn)(lhs, _builder=self.builder) else: @@ -459,29 +354,77 @@ def visit_If(self, node): cond = self.visit(node.test) if isinstance(cond, triton.language.tensor): cond = cond.to(triton.language.int1, _builder=self.builder) - current_bb = self.builder.get_insert_block() - then_bb = _triton.ir.basic_block.create(self.builder.context, "then", current_bb.parent) - else_bb = _triton.ir.basic_block.create(self.builder.context, "else", current_bb.parent) if node.orelse else None - endif_bb = _triton.ir.basic_block.create(self.builder.context, "endif", current_bb.parent) - self.value_constructor._seal_block(then_bb) - if else_bb: - self.value_constructor._seal_block(else_bb) - self.builder.cond_br(cond.handle, then_bb, else_bb) - else: - self.builder.cond_br(cond.handle, then_bb, endif_bb) - self.builder.set_insert_block(then_bb) - is_terminator = self.visit_compound_statement(node.body) - # TODO: last statement is a terminator? - if not is_terminator: - self.builder.br(endif_bb) - if else_bb: - self.builder.set_insert_block(else_bb) - is_terminator = self.visit_compound_statement(node.orelse) - # TODO: last statement is a terminator? - if not is_terminator: - self.builder.br(endif_bb) - self.value_constructor._seal_block(endif_bb) - self.builder.set_insert_block(endif_bb) + with enter_sub_region(self) as sr: + liveins, ip_block = sr + liveins_copy = liveins.copy() + then_block = self.builder.create_block() + self.builder.set_insertion_point_to_start(then_block) + self.visit_compound_statement(node.body) + then_defs = self.local_defs.copy() + + # when need an else block when: + # 1. we have an orelse node + # or + # 2. the then block defines new variable + else_defs = {} + if then_defs or node.orelse: + if node.orelse: + self.lscope = liveins + self.local_defs = {} + else_block = self.builder.create_block() + self.builder.set_insertion_point_to_end(else_block) + self.visit_compound_statement(node.orelse) + else_defs = self.local_defs.copy() + else: + # collect else_defs + for name in then_defs: + if name in liveins: + assert self.is_triton_tensor(then_defs[name]) + assert self.is_triton_tensor(liveins[name]) + else_defs[name] = liveins[name] + # collect yields + names = [] + ret_types = [] + for then_name in then_defs: + for else_name in else_defs: + if then_name == else_name: + if then_defs[then_name].type == else_defs[else_name].type: + names.append(then_name) + ret_types.append(then_defs[then_name].type) + + # defined in else block but not in then block + # to find in parent scope and yield them + for else_name in else_defs: + if else_name in liveins and else_name not in then_defs: + if else_defs[else_name].type == liveins[else_name].type: + names.append(else_name) + ret_types.append(else_defs[else_name].type) + then_defs[else_name] = liveins_copy[else_name] + self.builder.set_insertion_point_to_end(ip_block) + + if then_defs or node.orelse: # with else block + if_op = self.builder.create_if_op([ty.to_ir(self.builder) for ty in ret_types], cond.handle, True) + then_block.merge_block_before(if_op.get_then_block()) + self.builder.set_insertion_point_to_end(if_op.get_then_block()) + if len(names) > 0: + self.builder.create_yield_op([then_defs[n].handle for n in names]) + if not node.orelse: + else_block = if_op.get_else_block() + else: + else_block.merge_block_before(if_op.get_else_block()) + self.builder.set_insertion_point_to_end(if_op.get_else_block()) + if len(names) > 0: + self.builder.create_yield_op([else_defs[n].handle for n in names]) + else: # no else block + if_op = self.builder.create_if_op([ty.to_ir(self.builder) for ty in ret_types], cond.handle, False) + then_block.merge_block_before(if_op.get_then_block()) + + # update values yielded by IfOp + for i, name in enumerate(names): + new_tensor = triton.language.core.tensor(if_op.get_result(i), ret_types[i]) + self.lscope[name] = new_tensor + self.local_defs[name] = new_tensor + else: if isinstance(cond, triton.language.constexpr): cond = cond.value @@ -505,16 +448,14 @@ def visit_Compare(self, node): assert len(node.ops) == 1 lhs = self.visit(node.left) rhs = self.visit(node.comparators[0]) - is_lhs_constexpr = isinstance(lhs, triton.language.constexpr) - is_rhs_constexpr = isinstance(rhs, triton.language.constexpr) - lhs = lhs.value if is_lhs_constexpr else lhs - rhs = rhs.value if is_rhs_constexpr else rhs - # handle `is`` and `is not`` + if isinstance(lhs, triton.language.constexpr): + lhs = lhs.value + if isinstance(rhs, triton.language.constexpr): + rhs = rhs.value if type(node.ops[0]) == ast.Is: return triton.language.constexpr(lhs is rhs) if type(node.ops[0]) == ast.IsNot: return triton.language.constexpr(lhs is not rhs) - # function name fn = { ast.Eq: '__eq__', ast.NotEq: '__ne__', @@ -523,17 +464,13 @@ def visit_Compare(self, node): ast.Gt: '__gt__', ast.GtE: '__ge__', }[type(node.ops[0])] - # return a new constexpr if both arg are constexprs - if is_lhs_constexpr and is_rhs_constexpr: - return triton.language.constexpr(getattr(lhs, fn)(rhs)) - # call operator - if is_triton_tensor(lhs): + if self.is_triton_tensor(lhs): return getattr(lhs, fn)(rhs, _builder=self.builder) - elif is_triton_tensor(rhs): + elif self.is_triton_tensor(rhs): fn = fn[:2] + 'r' + fn[2:] return getattr(rhs, fn)(lhs, _builder=self.builder) else: - assert False + return getattr(lhs, fn)(rhs) def visit_UnaryOp(self, node): op = self.visit(node.operand) @@ -545,38 +482,79 @@ def visit_UnaryOp(self, node): ast.UAdd: '__pos__', ast.Invert: '__invert__', }[type(node.op)] - if isinstance(op, triton.language.constexpr): - return triton.language.constexpr(getattr(op.value, fn)()) - assert is_triton_tensor(op) - return getattr(op, fn)(_builder=self.builder) + if self.is_triton_tensor(op): + return getattr(op, fn)(_builder=self.builder) + return getattr(op, fn)() def visit_While(self, node): - current_bb = self.builder.get_insert_block() - loop_bb = _triton.ir.basic_block.create(self.builder.context, "loop", current_bb.parent) - next_bb = _triton.ir.basic_block.create(self.builder.context, "postloop", current_bb.parent) + with enter_sub_region(self) as sr: + liveins, insert_block = sr - def continue_fn(): + # condition (the before region) + cond_block = self.builder.create_block() + self.builder.set_insertion_point_to_start(cond_block) cond = self.visit(node.test) - return self.builder.cond_br(cond.handle, loop_bb, next_bb) - - continue_fn() - self.builder.set_insert_block(loop_bb) - self.visit_compound_statement(node.body) - continue_fn() - stop_bb = self.builder.get_insert_block() - self.value_constructor._seal_block(stop_bb) - self.value_constructor._seal_block(loop_bb) - self.value_constructor._seal_block(next_bb) - self.builder.set_insert_block(next_bb) + + # loop body (the after region) + loop_block = self.builder.create_block() + self.builder.set_insertion_point_to_start(loop_block) + self.visit_compound_statement(node.body) + loop_defs = self.local_defs + + # collect loop-carried values + names = [] + ret_types = [] + init_args = [] + yields = [] + for name in loop_defs: + if name in liveins: + # We should not def new constexpr + assert self.is_triton_tensor(loop_defs[name]) + assert self.is_triton_tensor(liveins[name]) + if loop_defs[name].type == liveins[name].type: + # these are loop-carried values + names.append(name) + ret_types.append(loop_defs[name].type) + init_args.append(liveins[name]) + yields.append(loop_defs[name]) + + self.builder.set_insertion_point_to_end(insert_block) + while_op = self.builder.create_while_op([ty.to_ir(self.builder) for ty in ret_types], + [arg.handle for arg in init_args]) + # merge the condition region + before_block = self.builder.create_block_with_parent(while_op.get_before(), + [ty.to_ir(self.builder) for ty in ret_types]) + cond_block.merge_block_before(before_block) + self.builder.set_insertion_point_to_end(before_block) + # create ConditionOp: e.g., scf.condition(%cond) %arg0, %arg1, ... + self.builder.create_condition_op(cond.handle, [before_block.arg(i) for i in range(len(init_args))]) + # merge the loop body + after_block = self.builder.create_block_with_parent(while_op.get_after(), + [ty.to_ir(self.builder) for ty in ret_types]) + loop_block.merge_block_before(after_block) + self.builder.set_insertion_point_to_end(after_block) + self.builder.create_yield_op([y.handle for y in yields]) + + # update global uses in while_op + for i, name in enumerate(names): + before_block.replace_use_in_block_with(init_args[i].handle, before_block.arg(i)) + after_block.replace_use_in_block_with(init_args[i].handle, after_block.arg(i)) + + # WhileOp defines new values, update the symbol table (lscope, local_defs) + for i, name in enumerate(names): + new_def = triton.language.core.tensor(while_op.get_result(i), ret_types[i]) + self.lscope[name] = new_def + self.local_defs[name] = new_def for stmt in node.orelse: + assert False, "Not implemented" ast.NodeVisitor.generic_visit(self, stmt) def visit_Subscript(self, node): assert node.ctx.__class__.__name__ == "Load" lhs = self.visit(node.value) slices = self.visit(node.slice) - if is_triton_tensor(lhs): + if self.is_triton_tensor(lhs): return lhs.__getitem__(slices, _builder=self.builder) return lhs[slices] @@ -585,76 +563,103 @@ def visit_ExtSlice(self, node): def visit_For(self, node): iterator = self.visit(node.iter.func) - if iterator != self.value_constructor.builtins['range']: + if iterator != self.builtins['range']: raise RuntimeError('Only `range` iterator currently supported') - # static for loops: all iterator arguments are constexpr + # visit iterator arguments + # note: only `range` iterator is supported now iter_args = [self.visit(arg) for arg in node.iter.args] - is_static = all([isinstance(x, triton.language.constexpr) for x in iter_args]) - if is_static: - st_target = ast.Name(id=node.target.id, ctx=ast.Store()) - iter_args = [arg.value for arg in iter_args] - range = iterator(*iter_args) - if len(range) <= 10: - for i in iterator(*iter_args): - self.value_constructor.lscope[node.target.id] = triton.language.constexpr(i) + # collect lower bound (lb), upper bound (ub), and step + lb = iter_args[0] if len(iter_args) > 1 else self.visit(ast.Num(0)) + ub = iter_args[1] if len(iter_args) > 1 else self.visit(node.iter.args[0]) + step = iter_args[2] if len(iter_args) > 2 else self.visit(ast.Num(1)) + # static for loops: all iterator arguments are constexpr + if isinstance(lb, triton.language.constexpr) and \ + isinstance(ub, triton.language.constexpr) and \ + isinstance(step, triton.language.constexpr): + sta_range = iterator(lb.value, ub.value, step.value) + static_unrolling = os.environ.get('TRITON_STATIC_LOOP_UNROLLING', False) + if static_unrolling and len(sta_range) <= 10: + for i in sta_range: + self.lscope[node.target.id] = triton.language.constexpr(i) self.visit_compound_statement(node.body) for stmt in node.orelse: ast.NodeVisitor.generic_visit(self, stmt) return - - # create nodes - st_target = ast.Name(id=node.target.id, ctx=ast.Store()) - ld_target = ast.Name(id=node.target.id, ctx=ast.Load()) - arg_0 = node.iter.args[0] if len(node.iter.args) > 1 else ast.Num(0) - arg_1 = node.iter.args[1] if len(node.iter.args) > 1 else node.iter.args[0] - arg_2 = node.iter.args[2] if len(node.iter.args) > 2 else ast.Num(1) - # init node - init_node = ast.Assign(targets=[st_target], value=arg_0) - - # step node - pos_cond_node = ast.Compare(ld_target, [ast.Lt()], [arg_1]) - neg_cond_node = ast.Compare(ld_target, [ast.Gt()], [arg_1]) - pos_step_node = ast.Compare(arg_2, [ast.Gt()], [ast.Num(0)]) - build_cond = lambda: triton.language.where(self.visit(pos_step_node), - self.visit(pos_cond_node), - self.visit(neg_cond_node), - _builder=self.builder) - # cond_node = neg_cond_node - step_node = ast.AugAssign(target=st_target, op=ast.Add(), value=arg_2) - # code generation - current_bb = self.builder.get_insert_block() - loop_bb = _triton.ir.basic_block.create(self.builder.context, "loop", current_bb.parent) - next_bb = _triton.ir.basic_block.create(self.builder.context, "postloop", current_bb.parent) - - def continue_fn(): - self.visit(step_node) - cond = build_cond() - return self.builder.cond_br(cond.handle, loop_bb, next_bb) - - # init loop induction variable - self.visit(init_node) - # promote it to right type - init_val = self.value_constructor.get_value(node.target.id) - promote = lambda a, b: triton.language.semantic.computation_type_impl(a, b, False) - start_ty = triton.language.core._to_tensor(iter_args[0], self.builder).type - stop_ty = triton.language.core._to_tensor(iter_args[1], self.builder).type if len(iter_args) > 1 else None - ty = promote(start_ty, stop_ty) if len(iter_args) > 1 else start_ty - casted = triton.language.semantic.cast(init_val, ty, self.builder) - self.value_constructor.set_value(node.target.id, casted) - # create cond - cond = build_cond() - self.builder.cond_br(cond.handle, loop_bb, next_bb) - self.builder.set_insert_block(loop_bb) - self.visit_compound_statement(node.body) - # TODO: handle case where body breaks control flow - continue_fn() - stop_bb = self.builder.get_insert_block() - self.value_constructor._seal_block(stop_bb) - self.value_constructor._seal_block(loop_bb) - self.value_constructor._seal_block(next_bb) - self.builder.set_insert_block(next_bb) + # handle negative constant step (not supported by scf.for in MLIR) + negative_step = False + if isinstance(step, triton.language.constexpr) and step.value < 0: + step = triton.language.constexpr(-step.value) + negative_step = True + lb, ub = ub, lb + # lb/ub/step might be constexpr, we need to cast them to tensor + lb = triton.language.core._to_tensor(lb, self.builder).handle + ub = triton.language.core._to_tensor(ub, self.builder).handle + step = triton.language.core._to_tensor(step, self.builder).handle + # ForOp can only accept IndexType as lb/ub/step. Cast integer to Index + lb = self.builder.create_to_index(lb) + ub = self.builder.create_to_index(ub) + step = self.builder.create_to_index(step) + # Create placeholder for the loop induction variable + iv = self.builder.create_undef(self.builder.get_int32_ty()) + self.set_value(node.target.id, triton.language.core.tensor(iv, triton.language.core.int32)) + + with enter_sub_region(self) as sr: + liveins, insert_block = sr + + # create loop body block + block = self.builder.create_block() + self.builder.set_insertion_point_to_start(block) + + # visit loop body + self.visit_compound_statement(node.body) + + # If a variable (name) is defined in both its parent & itself, then it's + # a loop-carried variable. (They must be of the same type) + init_args = [] + yields = [] + names = [] + for name in self.local_defs: + if name in liveins: + assert self.is_triton_tensor(self.local_defs[name]), f'{name} is not tensor' + assert self.is_triton_tensor(liveins[name]) + if self.local_defs[name].type != liveins[name].type: + local_value = self.local_defs[name] + self.local_defs[name] = local_value.to(liveins[name].dtype, _builder=self.builder) + names.append(name) + init_args.append(triton.language.core._to_tensor(liveins[name], self.builder)) + yields.append(triton.language.core._to_tensor(self.local_defs[name], self.builder)) + + # create ForOp + self.builder.set_insertion_point_to_end(insert_block) + for_op = self.builder.create_for_op(lb, ub, step, [arg.handle for arg in init_args]) + block.merge_block_before(for_op.get_body(0)) + + # update induction variable with actual value, and replace all uses + self.builder.set_insertion_point_to_start(for_op.get_body(0)) + iv = self.builder.create_index_to_si(for_op.get_induction_var()) + if negative_step: + ub_si = self.builder.create_index_to_si(ub) + iv = self.builder.create_sub(ub_si, iv) + self.lscope[node.target.id].handle.replace_all_uses_with(iv) + self.set_value(node.target.id, triton.language.core.tensor(iv, triton.language.core.int32)) + + # create YieldOp + self.builder.set_insertion_point_to_end(for_op.get_body(0)) + if len(yields) > 0: + self.builder.create_yield_op([y.handle for y in yields]) + for_op_region = for_op.get_body(0).get_parent() + assert for_op_region.size() == 1, "We use SCF, so the loop body should only have one block" + # replace global uses with block arguments + for i, name in enumerate(names): + # arg0 is the induction variable + for_op.get_body(0).replace_use_in_block_with(init_args[i].handle, for_op.get_body(0).arg(i + 1)) + + # update lscope & local_defs (ForOp defines new values) + for i, name in enumerate(names): + self.set_value(name, triton.language.core.tensor(for_op.get_result(i), yields[i].type)) for stmt in node.orelse: + assert False, "Don't know what to do with else after for" ast.NodeVisitor.generic_visit(self, stmt) def visit_Slice(self, node): @@ -677,7 +682,6 @@ def visit_Call(self, node): for keyword in node.keywords: kws.update(self.visit(keyword)) args = [self.visit(arg) for arg in node.args] - if isinstance(fn, triton.runtime.JITFunction): from inspect import getcallargs args = getcallargs(fn.fn, *args, **kws) @@ -695,51 +699,59 @@ def visit_Call(self, node): fn_name = mangle_fn(fn.__name__, arg_types, constants) # generate function def if necessary if not self.module.has_function(fn_name): - ret_type = triton.language.void - prototype = triton.language.function_type(ret_type, arg_types) + prototype = triton.language.function_type([], arg_types) gscope = sys.modules[fn.fn.__module__].__dict__ - generator = CodeGenerator(self.builder.context, prototype, gscope, attributes, constants, function_name=fn_name, prototypes=self.prototypes, module=self.module) + generator = CodeGenerator(self.builder.context, prototype, gscope, attributes, constants, module=self.module, function_name=fn_name, function_types=self.function_ret_types) generator.visit(fn.parse()) + callee_ret_type = generator.last_ret_type + self.function_ret_types[fn_name] = callee_ret_type + else: + callee_ret_type = self.function_ret_types[fn_name] symbol = self.module.get_function(fn_name) - ret = self.builder.call(symbol, arg_vals) - if not ret.type.is_void(): - ret = triton.language.tensor(ret, self.prototypes[fn_name].ret_type) - return ret - # built-in function - if sys.modules[fn.__module__] is triton.language.core or isinstance(fn, triton.language.extern.ExternalFunction): - ret = fn(*args, _builder=self.builder, **kws) - if fn in self.value_constructor.builtins.values(): + call_op = self.builder.call(symbol, arg_vals) + if call_op.get_num_results() == 0 or callee_ret_type is None: + return None + elif call_op.get_num_results() == 1: + return triton.language.tensor(call_op.get_result(0), callee_ret_type) + else: + # should return a tuple of tl.tensor + results = [] + for i in range(call_op.get_num_results()): + results.append(triton.language.tensor(call_op.get_result(i), callee_ret_type[i])) + return tuple(results) + if (hasattr(fn, '__self__') and self.is_triton_tensor(fn.__self__)) \ + or impl.is_builtin(fn): + return fn(*args, _builder=self.builder, **kws) + if fn in self.builtins.values(): args = [arg.value if isinstance(arg, triton.language.constexpr) else arg for arg in args] - ret = fn(*args, **kws) - if isinstance(ret, (bool, int, float)): - ret = triton.language.core.constexpr(ret) - else: - ret = triton.language.core._to_tensor(ret, self.builder) - # special case: dynamic parallelism - # in this case the core primitive returns a proxy - # if isinstance(ret, triton.language.core.LaunchProxy): - # ret_type = _triton.ir.type.get_void(self.builder.context) - # arg_tys = [x.type for x in ret.args] - # prototype = _triton.ir.type.make_function(ret_type, arg_tys) - # gscope = sys.modules[ret.fn.fn.__module__].__dict__ - # constants = ret.constants - # fn_name = mangle_fn(ret.fn.__name__, arg_tys, ret.constants) - # # TODO: clean-up attributes handling in function - # if not self.module.has_function(fn_name): - # attributes = {i: list(arg.parent.get_attrs(arg))[0].value for i, arg in enumerate(ret.args) \ - # if isinstance(arg, _triton.ir.argument) and arg.parent.has_attr(i + 1) } - # generator = CodeGenerator(self.builder.context, prototype, gscope, attributes, constants, module=self.module, is_kernel=True) - # generator.visit(ret.fn.parse()) - # symbol = self.module.get_function(fn_name) - # # TODO: should ret.args not include any constants ? - # ret = self.builder.launch(symbol, ret.args, ret.grid, ret.num_warps) - return ret - # return fn(*args, **kws) + return fn(*args, **kws) def visit_Constant(self, node): return triton.language.constexpr(node.value) + def visit_BoolOp(self, node: ast.BoolOp): + assert len(node.values) == 2 + lhs = self.visit(node.values[0]) + rhs = self.visit(node.values[1]) + if isinstance(lhs, triton.language.constexpr): + lhs = lhs.value + if isinstance(rhs, triton.language.constexpr): + rhs = rhs.value + + fn = { + ast.And: 'logical_and', + ast.Or: 'logical_or', + }[type(node.op)] + + if self.is_triton_tensor(lhs): + return getattr(lhs, fn)(rhs, _builder=self.builder) + elif self.is_triton_tensor(rhs): + fn = fn[:2] + 'r' + fn[2:] + return getattr(rhs, fn)(lhs, _builder=self.builder) + else: + return getattr(lhs, fn)(rhs) + if sys.version_info < (3, 8): def visit_NameConstant(self, node): return triton.language.constexpr(node.value) @@ -752,6 +764,9 @@ def visit_Str(self, node): def visit_Attribute(self, node): lhs = self.visit(node.value) + if isinstance(lhs, triton.language.tensor): + if node.attr == "T": + return triton.language.semantic.trans(lhs, builder=self.builder) return getattr(lhs, node.attr) def visit_Expr(self, node): @@ -794,6 +809,7 @@ def __init__(self, required, limit, name): self.message = f'out of resource: {name}, '\ f'Required: {required}, '\ f'Hardware limit: {limit}' + self.message += '. Reducing block sizes or `num_stages` may help.' self.required = required self.limit = limit self.name = name @@ -816,9 +832,16 @@ def kernel_suffix(signature, specialization): suffix += 'd' return suffix +# ------------------------------------------------------------------------------ +# ------------------------------------------------------------------------------ -def make_triton_ir(fn, signature, specialization, constants): + +def build_triton_ir(fn, signature, specialization, constants): + # canonicalize signature + if isinstance(signature, str): + signature = {k: v.strip() for k, v in enumerate(signature.split(","))} context = _triton.ir.context() + context.load_triton() # create kernel prototype cst_key = lambda i: fn.arg_names.index(i) if isinstance(i, str) else i constants = {cst_key(key): value for key, value in constants.items()} @@ -832,7 +855,7 @@ def make_triton_ir(fn, signature, specialization, constants): all_constants.update(new_constants) arg_types = [str_to_ty(v) for k, v in signature.items() if k not in constants] - prototype = triton.language.function_type(triton.language.void, arg_types) + prototype = triton.language.function_type([], arg_types) generator = CodeGenerator(context, prototype, gscope=gscope, constants=all_constants, function_name=function_name, attributes=new_attrs, is_kernel=True) try: generator.visit(fn.parse()) @@ -847,25 +870,81 @@ def make_triton_ir(fn, signature, specialization, constants): return ret, generator -def make_ptx(mod: Any, device: int) -> Tuple[str, int]: +def optimize_triton_ir(mod): + pm = _triton.ir.pass_manager(mod.context) + pm.enable_debug() + pm.add_inliner_pass() + pm.add_triton_combine_pass() + pm.add_canonicalizer_pass() + pm.add_cse_pass() + pm.add_licm_pass() + pm.run(mod) + return mod + + +def ast_to_ttir(fn, signature, specialization, constants): + mod, _ = build_triton_ir(fn, signature, specialization, constants) + return optimize_triton_ir(mod) + + +def ttir_to_ttgir(mod, num_warps, num_stages, compute_capability): + pm = _triton.ir.pass_manager(mod.context) + pm.add_convert_triton_to_tritongpu_pass(num_warps) + pm.enable_debug() + pm.add_coalesce_pass() + # The combine pass converts blocked layout to mma layout + # for dot ops so that pipeline can get shared memory swizzled correctly. + pm.add_triton_gpu_combine_pass(compute_capability) + pm.add_tritongpu_pipeline_pass(num_stages) + # Prefetch must be done after pipeline pass because pipeline pass + # extracts slices from the original tensor. + pm.add_tritongpu_prefetch_pass() + pm.add_canonicalizer_pass() + pm.add_cse_pass() + pm.add_triton_gpu_combine_pass(compute_capability) + pm.add_licm_pass() + pm.add_triton_gpu_combine_pass(compute_capability) + pm.add_cse_pass() + pm.run(mod) + return mod + + +def add_external_libs(mod, libs): + for name, path in libs.items(): + if len(name) == 0 or len(path) == 0: + return + _triton.add_external_libs(mod, list(libs.keys()), list(libs.values())) + + +def ttgir_to_llir(mod, extern_libs, compute_capability): + if extern_libs: + add_external_libs(mod, extern_libs) + return _triton.translate_triton_gpu_to_llvmir(mod, compute_capability) + + +def llir_to_ptx(mod: Any, compute_capability: int, ptx_version: int = None) -> Tuple[str, int]: ''' Translate TritonGPU module to PTX code. :param mod: a TritonGPU dialect module :return: - PTX code - - shared memory alloaction size + - shared memory allocation size ''' - return _triton.translate_triton_gpu_to_ptx(mod, device) + if ptx_version is None: + _, cuda_version = path_to_ptxas() + ptx_version = ptx_get_version(cuda_version) + return _triton.translate_llvmir_to_ptx(mod, compute_capability, ptx_version) -def make_cubin(ptx, device): +def ptx_to_cubin(ptx: str, compute_capability: int): ''' Compile TritonGPU module to cubin. :param ptx: ptx code - :param device: CUDA device + :param compute_capability: compute capability :return: str ''' - return _triton.compile_ptx_to_cubin(ptx, device) + ptxas, _ = path_to_ptxas() + return _triton.compile_ptx_to_cubin(ptx, ptxas, compute_capability) def ptx_get_kernel_name(ptx: str) -> str: @@ -881,25 +960,57 @@ def ptx_get_kernel_name(ptx: str) -> str: return line.split()[-1] -def _compile(fn, signature: str, device: int = -1, constants=dict(), - specialization=_triton.code_gen.instance_descriptor(), - num_warps: int = 4, num_stages: int = 3, extern_libs=None, - output: str = "ttgir", cc=0) -> Tuple[str, int, str]: - valid_outputs = ("ttir", "ttgir", "ptx", "cubin") - assert output in valid_outputs, "output should be one of [%s], but get \"%s\"" % (','.join(valid_outputs), output) - - # triton-ir - module, _ = make_triton_ir(fn, signature, specialization, constants) - if output == "ttir": - return module - - assert output == "cubin" - assert torch.version.hip is None - backend = _triton.runtime.backend.CUDA - if extern_libs is None: - extern_libs = dict() - name, asm, shared_mem = _triton.code_gen.compile_ttir(backend, module, device, num_warps, num_stages, extern_libs, cc) - return asm, shared_mem, name +@functools.lru_cache +def ptx_get_version(cuda_version) -> int: + ''' + Get the highest PTX version supported by the current CUDA driver. + ''' + assert isinstance(cuda_version, str) + major, minor = map(int, cuda_version.split('.')) + version = major * 1000 + minor * 10 + if version >= 11040: + return 74 + if version >= 11030: + return 73 + if version >= 11020: + return 72 + if version >= 11010: + return 71 + if version >= 11000: + return 70 + if version >= 10020: + return 65 + if version >= 10010: + return 64 + if version >= 10000: + return 63 + raise RuntimeError("Triton only support CUDA 10.0 or higher") + + +def path_to_ptxas(): + prefixes = [ + os.environ.get("TRITON_PTXAS_PATH", ""), + "", + "/usr", + os.environ.get('CUDA_PATH', default_cuda_dir()) + ] + for prefix in prefixes: + ptxas = os.path.join(prefix, "bin", "ptxas") + if os.path.exists(ptxas): + result = subprocess.check_output([ptxas, "--version"], stderr=subprocess.STDOUT) + if result is not None: + version = re.search(r".*release (\d+\.\d+).*", result.decode("utf-8"), flags=re.MULTILINE) + if version is not None: + return ptxas, version.group(1) + raise RuntimeError("Cannot find ptxas") + + +instance_descriptor = namedtuple("instance_descriptor", ["divisible_by_16", "equal_to_1"], defaults=[set(), set()]) + + +# ------------------------------------------------------------------------------ +# compiler +# ------------------------------------------------------------------------------ def ty_to_cpp(ty): @@ -916,6 +1027,7 @@ def ty_to_cpp(ty): "fp16": "float", "bf16": "float", "fp32": "float", + "f32": "float", "fp64": "double", }[ty] @@ -934,7 +1046,7 @@ def binary_name_to_header_name(name): return f"{name}.h" -def generate_launcher(identifier, constants, signature): +def generate_launcher(constants, signature): arg_decls = ', '.join(f"{ty_to_cpp(ty)} arg{i}" for i, ty in signature.items()) def _extracted_type(ty): @@ -949,6 +1061,7 @@ def _extracted_type(ty): 'fp16': 'float', 'bf16': 'float', 'fp32': 'float', + 'f32': 'float', 'fp64': 'double', }[ty] @@ -984,8 +1097,8 @@ def format_of(ty): PyErr_SetString(PyExc_RuntimeError, err); }} }} -#define CUDA_CHECK(ans) {{ gpuAssert((ans), __FILE__, __LINE__); }} +#define CUDA_CHECK(ans) {{ gpuAssert((ans), __FILE__, __LINE__); }} void _launch(int gridX, int gridY, int gridZ, int num_warps, int shared_memory, CUstream stream, CUfunction function, {arg_decls}) {{ void *params[] = {{ {', '.join(f"&arg{i}" for i in signature.keys() if i not in constants)} }}; @@ -994,7 +1107,6 @@ def format_of(ty): }} }} - static inline CUdeviceptr getPointer(PyObject *obj, int idx) {{ if (PyLong_Check(obj)) {{ return (CUdeviceptr)PyLong_AsUnsignedLongLong(obj); @@ -1017,7 +1129,6 @@ def format_of(ty): return (CUdeviceptr)0; }} - static PyObject* launch(PyObject* self, PyObject* args) {{ int gridX, gridY, gridZ; uint64_t _stream; @@ -1093,6 +1204,11 @@ def default_cache_dir(): return os.path.join(os.environ["HOME"], ".triton", "cache") +def default_cuda_dir(): + default_dir = "/usr/local/cuda" + return os.getenv("CUDA_HOME", default=default_dir) + + class CacheManager: def __init__(self, key): @@ -1116,6 +1232,9 @@ def has_file(self, filename): def put(self, data, filename, binary=True): if not self.cache_dir: return + binary = isinstance(data, bytes) + if not binary: + data = str(data) assert self.lock_path is not None filepath = self._make_path(filename) with FileLock(self.lock_path): @@ -1126,7 +1245,7 @@ def put(self, data, filename, binary=True): os.rename(filepath + ".tmp", filepath) -# utilities for generating and compiling C wrappers +# Utilities for generating and compiling C wrappers @functools.lru_cache() @@ -1135,12 +1254,6 @@ def libcuda_dirs(): return [os.path.dirname(loc) for loc in locs] -@functools.lru_cache() -def cuda_home_dirs(): - default_dir = "/usr/local/cuda" - return os.getenv("CUDA_HOME", default=default_dir) - - @contextlib.contextmanager def quiet(): old_stdout, old_stderr = sys.stdout, sys.stderr @@ -1153,7 +1266,8 @@ def quiet(): def _build(name, src, srcdir): cuda_lib_dirs = libcuda_dirs() - cu_include_dir = os.path.join(cuda_home_dirs(), "include") + cuda_path = os.environ.get('CUDA_PATH', default_cuda_dir()) + cu_include_dir = os.path.join(cuda_path, "include") suffix = sysconfig.get_config_var('EXT_SUFFIX') so = os.path.join(srcdir, '{name}{suffix}'.format(name=name, suffix=suffix)) # try to avoid setuptools if possible @@ -1164,9 +1278,11 @@ def _build(name, src, srcdir): gcc = shutil.which("gcc") cc = gcc if gcc is not None else clang py_include_dir = get_paths()["include"] + cc_cmd = [cc, src, "-O3", f"-I{cu_include_dir}", f"-I{py_include_dir}", f"-I{srcdir}", "-shared", "-fPIC", "-lcuda", "-o", so] cc_cmd += [f"-L{dir}" for dir in cuda_lib_dirs] ret = subprocess.check_call(cc_cmd) + if ret == 0: return so # fallback on setuptools @@ -1219,12 +1335,26 @@ def make_fn_cache_key(fn_hash, signature, configs, constants, num_warps, num_sta return key -def compile(fn, signature: str, device: int = -1, constants=dict(), num_warps: int = 4, - num_stages: int = 3, extern_libs=None, configs=None, cc=0, warm_cache_only=False): - # we get the kernel, i.e. the first function generated in the module - assert len(configs) == 1 - # cache manager - name = fn.__name__ +def read_or_execute(cache_manager, force_compile, file_name, metadata, + run_if_found: Callable[[str], bytes] = None, + run_if_not_found: Callable = None): + suffix = file_name.split(".")[1] + if not force_compile and cache_manager.has_file(file_name): + module = run_if_found(cache_manager._make_path(file_name)) + data = module if isinstance(module, bytes) else str(module).encode("utf-8") + md5 = hashlib.md5(data).hexdigest() + has_changed = metadata and md5 != metadata["md5"][suffix] + return module, md5, has_changed, True + module = run_if_not_found() + data = module if isinstance(module, bytes) else str(module).encode("utf-8") + md5 = hashlib.md5(data).hexdigest() + cache_manager.put(data, file_name, True if isinstance(data, bytes) else data) + return module, md5, True, False + +# + + +def make_stub(name, signature, constants): # name of files that are cached so_cache_key = make_so_cache_key(triton.runtime.jit.version_key(), signature, constants) so_cache_manager = CacheManager(so_cache_key) @@ -1232,40 +1362,168 @@ def compile(fn, signature: str, device: int = -1, constants=dict(), num_warps: i # retrieve stub from cache if it exists if not so_cache_manager.has_file(so_name): with tempfile.TemporaryDirectory() as tmpdir: - src = generate_launcher(name, constants, signature) + src = generate_launcher(constants, signature) src_path = os.path.join(tmpdir, "main.c") with open(src_path, "w") as f: f.write(src) - so = _build(fn.__name__, src_path, tmpdir) + so = _build(name, src_path, tmpdir) with open(so, "rb") as f: so_cache_manager.put(f.read(), so_name, binary=True) + return so_cache_manager._make_path(so_name) + + +def convert_type_repr(x): + match = re.search(r'!tt\.ptr<(.*)>', x) + if match is not None: + return '*' + convert_type_repr(match.group(1)) + return x + + +def make_hash(fn, **kwargs): + if isinstance(fn, triton.runtime.JITFunction): + configs = kwargs["configs"] + signature = kwargs["signature"] + constants = kwargs.get("constants", dict()) + num_warps = kwargs.get("num_warps", 4) + num_stages = kwargs.get("num_stages", 3) + # Get unique key for the compiled code + get_conf_key = lambda conf: (sorted(conf.divisible_by_16), sorted(conf.equal_to_1)) + configs_key = [get_conf_key(conf) for conf in configs] + key = f"{fn.cache_key}-{''.join(signature.values())}-{configs_key}-{constants}-{num_warps}-{num_stages}" + return hashlib.md5(key.encode("utf-8")).hexdigest() + assert isinstance(fn, str) + return hashlib.md5((Path(fn).read_text() + triton.runtime.jit.version_key()).encode("utf-8")).hexdigest() + + +# - ^\s*func\s+ : match the start of the string, any leading whitespace, the keyword func, +# and any following whitespace +# - (public\s+)? : optionally match the keyword public and any following whitespace +# - (@\w+) : match an @ symbol followed by one or more word characters +# (letters, digits, or underscores), and capture it as group 1 (the function name) +# - (\((?:%\w+: \S+(?: \{\S+ = \S+ : \S+\})?(?:, )?)*\)) : match a pair of parentheses enclosing +# zero or more arguments separated by commas, and capture it as group 2 (the argument list) +mlir_prototype_pattern = r'^\s*func\s+(?:public\s+)?(@\w+)(\((?:%\w+: \S+(?: \{\S+ = \S+ : \S+\})?(?:, )?)*\))\s*\{\s*$' +ptx_prototype_pattern = r"\.(?:visible|extern)\s+\.(?:entry|func)\s+(\w+)\s*\(([^)]*)\)" +prototype_pattern = { + "ttir": mlir_prototype_pattern, + "ttgir": mlir_prototype_pattern, + "ptx": ptx_prototype_pattern, +} + +mlir_arg_type_pattern = r'%\w+: ([^,^\)\s]+)(?: \{\S+ = \S+ : \S+\})?,?' +ptx_arg_type_pattern = r"\.param\s+\.(\w+)" +arg_type_pattern = { + "ttir": mlir_arg_type_pattern, + "ttgir": mlir_arg_type_pattern, + "ptx": ptx_arg_type_pattern, +} + + +# def compile(fn, signature: str, device: int = -1, constants=dict(), num_warps: int = 4, num_stages: int = 3, extern_libs=None, configs=None): +def compile(fn, **kwargs): + capability = kwargs.get("cc", None) + if capability is None: + device = torch.cuda.current_device() + capability = torch.cuda.get_device_capability(device) + capability = capability[0] * 10 + capability[1] + # we get the kernel, i.e. the first function generated in the module + # if fn is not a JITFunction, then it + # has to be a path to a file + context = _triton.ir.context() + asm = dict() + constants = kwargs.get("constants", dict()) + num_warps = kwargs.get("num_warps", 4) + num_stages = kwargs.get("num_stages", 3 if capability >= 75 else 2) + extern_libs = kwargs.get("extern_libs", dict()) + # build compilation stages + stages = { + "ast": (lambda path: fn, None), + "ttir": (lambda path: _triton.ir.parse_mlir_module(path, context), + lambda src: ast_to_ttir(src, signature, configs[0], constants)), + "ttgir": (lambda path: _triton.ir.parse_mlir_module(path, context), + lambda src: ttir_to_ttgir(src, num_warps, num_stages, capability)), + "llir": (lambda path: Path(path).read_bytes(), + lambda src: ttgir_to_llir(src, extern_libs, capability)), + "ptx": (lambda path: Path(path).read_text(), + lambda src: llir_to_ptx(src, capability)), + "cubin": (lambda path: Path(path).read_bytes(), + lambda src: ptx_to_cubin(src, capability)) + } + # find out the signature of the function + if isinstance(fn, triton.runtime.JITFunction): + configs = kwargs.get("configs", None) + signature = kwargs["signature"] + if configs is None: + configs = [instance_descriptor()] + assert len(configs) == 1 + kwargs["configs"] = configs + name = fn.__name__ + first_stage = 0 + if isinstance(signature, str): + signature = {k: v.strip() for k, v in enumerate(signature.split(","))} + kwargs["signature"] = signature + else: + assert isinstance(fn, str) + _, ir = os.path.basename(fn).split(".") + src = Path(fn).read_text() + import re + match = re.search(prototype_pattern[ir], src, re.MULTILINE) + name, signature = match.group(1), match.group(2) + print(name, signature) + types = re.findall(arg_type_pattern[ir], signature) + print(types) + param_tys = [convert_type_repr(ty) for ty in types] + signature = {k: v for k, v in enumerate(param_tys)} + first_stage = list(stages.keys()).index(ir) - # retrieve cached shared object if it exists - fn_cache_key = make_fn_cache_key(fn.cache_key, signature, configs, constants, num_warps, num_stages) - fn_cache_manager = CacheManager(fn_cache_key) - ptx_name = f"{name}.ptx" - cubin_name = f"{name}.cubin" - data_name = f"{name}.json" - ttir_name = f"{name}.ttir" - llir_name = f"{name}.llir" - if not fn_cache_manager.has_file(cubin_name) or \ - not fn_cache_manager.has_file(data_name) or \ - not fn_cache_manager.has_file(ptx_name) or \ - not fn_cache_manager.has_file(ttir_name) or \ - not fn_cache_manager.has_file(llir_name): - asm, shared, kernel_name = _compile(fn, signature, device, constants, configs[0], num_warps, num_stages, - extern_libs, "cubin", cc) - metadata = {"name": kernel_name, "shared": shared, "num_warps": num_warps, "num_stages": num_stages} - fn_cache_manager.put(asm["cubin"], cubin_name) - fn_cache_manager.put(asm["ptx"], ptx_name, binary=False) - fn_cache_manager.put(asm["ttir"], ttir_name, binary=False) - fn_cache_manager.put(asm["llir"], llir_name, binary=False) - fn_cache_manager.put(json.dumps(metadata), data_name, binary=False) - - if warm_cache_only: - return # load_binary() requires a valid cuda context - - return CompiledKernel(name, so_cache_manager._make_path(so_name), fn_cache_manager.cache_dir, device) + # cache manager + so_path = make_stub(name, signature, constants) + # create cache manager + fn_cache_manager = CacheManager(make_hash(fn, **kwargs)) + # determine name and extension type of provided function + if isinstance(fn, triton.runtime.JITFunction): + name, ext = fn.__name__, "ast" + else: + name, ext = os.path.basename(fn).split(".") + + # load metadata if any + metadata = None + if fn_cache_manager.has_file(f'{name}.json'): + with open(fn_cache_manager._make_path(f"{name}.json")) as f: + metadata = json.load(f) + else: + metadata = {"num_warps": num_warps, "num_stages": num_stages, "ctime": dict()} + if ext == "ptx": + assert "shared" in kwargs, "ptx compilation must provide shared memory size" + metadata["shared"] = kwargs["shared"] + + first_stage = list(stages.keys()).index(ext) + asm = dict() + module = fn + # run compilation pipeline and populate metadata + for ir, (parse, compile) in list(stages.items())[first_stage:]: + path = fn_cache_manager._make_path(f"{name}.{ir}") + if ir == ext: + next_module = parse(fn) + elif os.path.exists(path) and\ + ir in metadata["ctime"] and\ + os.path.getctime(path) == metadata["ctime"][ir]: + next_module = parse(path) + else: + next_module = compile(module) + fn_cache_manager.put(next_module, f"{name}.{ir}") + if os.path.exists(path): + metadata["ctime"][ir] = os.path.getctime(path) + asm[ir] = next_module if ir == "cubin" else str(next_module) + if ir == "llir" and "shared" not in metadata: + metadata["shared"] = _triton.get_shared_memory_size(module) + if ir == "ptx": + metadata["name"] = ptx_get_kernel_name(next_module) + module = next_module + # write-back metadata + fn_cache_manager.put(json.dumps(metadata), f"{name}.json", binary=False) + # return handle to compiled kernel + return CompiledKernel(so_path, metadata, asm) class CompiledKernel: @@ -1274,7 +1532,7 @@ class CompiledKernel: launch_enter_hook = None launch_exit_hook = None - def __init__(self, fn_name, so_path, cache_dir, device): + def __init__(self, so_path, metadata, asm): # initialize launcher import importlib.util spec = importlib.util.spec_from_file_location("launcher", so_path) @@ -1282,30 +1540,39 @@ def __init__(self, fn_name, so_path, cache_dir, device): spec.loader.exec_module(mod) self.c_wrapper = getattr(mod, "launch") # initialize metadata - with open(os.path.join(cache_dir, f"{fn_name}.json")) as f: - metadata = json.load(f) self.shared = metadata["shared"] self.num_warps = metadata["num_warps"] self.num_stages = metadata["num_stages"] # initialize asm dict - self.asm = dict() - with open(os.path.join(cache_dir, f"{fn_name}.cubin"), "rb") as f: - self.asm["cubin"] = f.read() - with open(os.path.join(cache_dir, f"{fn_name}.ptx"), "r") as f: - self.asm["ptx"] = f.read() - with open(os.path.join(cache_dir, f"{fn_name}.llir"), "r") as f: - self.asm["llir"] = f.read() - with open(os.path.join(cache_dir, f"{fn_name}.ttir"), "r") as f: - self.asm["ttir"] = f.read() - - mod, func, n_regs, n_spills = _triton.code_gen.load_binary(metadata["name"], self.asm["cubin"], self.shared, device) - self.fn_name = fn_name + self.asm = asm + # binaries are lazily initialized + # because it involves doing runtime things + # (e.g., checking amount of shared memory on current device) + self.metadata = metadata + self.cu_module = None + self.cu_function = None + + def _init_handles(self): + if self.cu_module is not None: + return + device = torch.cuda.current_device() + global cuda_utils + init_cuda_utils() + max_shared = cuda_utils.get_device_properties(device)["max_shared_mem"] + if self.shared > max_shared: + raise OutOfResources(self.shared, max_shared, "shared memory") + mod, func, n_regs, n_spills = cuda_utils.load_binary(self.metadata["name"], self.asm["cubin"], self.shared, device) self.cu_module = mod self.cu_function = func - self.n_regs = n_regs - self.n_spills = n_spills + + def __getattribute__(self, name): + if name == 'c_wrapper': + self._init_handles() + return super().__getattribute__(name) def __getitem__(self, grid): + self._init_handles() + def runner(*args, stream=None): if stream is None: stream = torch.cuda.current_stream().cuda_stream @@ -1325,3 +1592,153 @@ def get_sass(self, fun=None): os.remove(path) self.asm['sass'] = self.sass return self.sass + + +class CudaUtils(object): + + def __new__(cls): + if not hasattr(cls, 'instance'): + cls.instance = super(CudaUtils, cls).__new__(cls) + return cls.instance + + def _generate_src(self): + return """ + #include + + #include \"cuda.h\" + #define PY_SSIZE_T_CLEAN + #include + + static inline void gpuAssert(CUresult code, const char *file, int line) + { + if (code != CUDA_SUCCESS) + { + const char* prefix = "Triton Error [CUDA]: "; + const char* str; + cuGetErrorString(code, &str); + char err[1024] = {0}; + strcat(err, prefix); + strcat(err, str); + PyErr_SetString(PyExc_RuntimeError, err); + } + } + + #define CUDA_CHECK(ans) { gpuAssert((ans), __FILE__, __LINE__); if(PyErr_Occurred()) return NULL; } + + static PyObject* getDeviceProperties(PyObject* self, PyObject* args){ + int device_id; + if(!PyArg_ParseTuple(args, "i", &device_id)) + return NULL; + // Get device handle + CUdevice device; + cuDeviceGet(&device, device_id); + + // create a struct to hold device properties + int max_shared_mem; + int multiprocessor_count; + int sm_clock_rate; + int mem_clock_rate; + int mem_bus_width; + CUDA_CHECK(cuDeviceGetAttribute(&max_shared_mem, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN, device)); + CUDA_CHECK(cuDeviceGetAttribute(&multiprocessor_count, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT, device)); + CUDA_CHECK(cuDeviceGetAttribute(&sm_clock_rate, CU_DEVICE_ATTRIBUTE_CLOCK_RATE, device)); + CUDA_CHECK(cuDeviceGetAttribute(&mem_clock_rate, CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE, device)); + CUDA_CHECK(cuDeviceGetAttribute(&mem_bus_width, CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH, device)); + + + return Py_BuildValue("{s:i, s:i, s:i, s:i, s:i}", "max_shared_mem", max_shared_mem, + "multiprocessor_count", multiprocessor_count, + "sm_clock_rate", sm_clock_rate, + "mem_clock_rate", mem_clock_rate, + "mem_bus_width", mem_bus_width); + } + + static PyObject* loadBinary(PyObject* self, PyObject* args) { + const char* name; + const char* data; + Py_ssize_t data_size; + int shared; + int device; + if(!PyArg_ParseTuple(args, "ss#ii", &name, &data, &data_size, &shared, &device)) { + return NULL; + } + CUfunction fun; + CUmodule mod; + int32_t n_regs = 0; + int32_t n_spills = 0; + // create driver handles + CUDA_CHECK(cuModuleLoadData(&mod, data)); + CUDA_CHECK(cuModuleGetFunction(&fun, mod, name)); + // get allocated registers and spilled registers from the function + CUDA_CHECK(cuFuncGetAttribute(&n_regs, CU_FUNC_ATTRIBUTE_NUM_REGS, fun)); + CUDA_CHECK(cuFuncGetAttribute(&n_spills, CU_FUNC_ATTRIBUTE_LOCAL_SIZE_BYTES, fun)); + n_spills /= 4; + // set dynamic shared memory if necessary + int shared_optin; + CUDA_CHECK(cuDeviceGetAttribute(&shared_optin, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN, device)); + if (shared > 49152 && shared_optin > 49152) { + CUDA_CHECK(cuFuncSetCacheConfig(fun, CU_FUNC_CACHE_PREFER_SHARED)); + int shared_total, shared_static; + CUDA_CHECK(cuDeviceGetAttribute(&shared_total, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR, device)); + CUDA_CHECK(cuFuncGetAttribute(&shared_static, CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES, fun)); + CUDA_CHECK(cuFuncSetAttribute(fun, CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, shared_optin - shared_static)); + } + + if(PyErr_Occurred()) { + return NULL; + } + return Py_BuildValue("(KKii)", (uint64_t)mod, (uint64_t)fun, n_regs, n_spills); + } + + static PyMethodDef ModuleMethods[] = { + {"load_binary", loadBinary, METH_VARARGS, "Load provided cubin into CUDA driver"}, + {"get_device_properties", getDeviceProperties, METH_VARARGS, "Get the properties for a given device"}, + {NULL, NULL, 0, NULL} // sentinel + }; + + static struct PyModuleDef ModuleDef = { + PyModuleDef_HEAD_INIT, + \"cuda_utils\", + NULL, //documentation + -1, //size + ModuleMethods + }; + + PyMODINIT_FUNC PyInit_cuda_utils(void) { + PyObject *m = PyModule_Create(&ModuleDef); + if(m == NULL) { + return NULL; + } + PyModule_AddFunctions(m, ModuleMethods); + return m; + } + """ + + def __init__(self): + src = self._generate_src() + key = hashlib.md5(src.encode("utf-8")).hexdigest() + cache = CacheManager(key) + fname = "cuda_utils.so" + if not cache.has_file(fname): + with tempfile.TemporaryDirectory() as tmpdir: + src_path = os.path.join(tmpdir, "main.c") + with open(src_path, "w") as f: + f.write(src) + so = _build("cuda_utils", src_path, tmpdir) + with open(so, "rb") as f: + cache.put(f.read(), fname, binary=True) + import importlib.util + spec = importlib.util.spec_from_file_location("cuda_utils", cache._make_path(fname)) + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + self.load_binary = mod.load_binary + self.get_device_properties = mod.get_device_properties + + +def init_cuda_utils(): + global cuda_utils + if cuda_utils is None: + cuda_utils = CudaUtils() + + +cuda_utils = None diff --git a/python/triton/impl/__init__.py b/python/triton/impl/__init__.py new file mode 100644 index 000000000000..c600c7fddc31 --- /dev/null +++ b/python/triton/impl/__init__.py @@ -0,0 +1,18 @@ +"""Triton internal implementation details. + +Client libraries should not import interfaces from the `triton.impl` module; +as the details are subject to change. + +APIs defined in the `triton.impl` module which are public will be re-exported +in other relevant `triton` module namespaces. +""" + +from .base import builtin, extern, is_builtin +from triton._C.libtriton.triton import ir + +__all__ = [ + "builtin", + "extern", + "ir", + "is_builtin", +] diff --git a/python/triton/impl/base.py b/python/triton/impl/base.py new file mode 100644 index 000000000000..24048c56dca8 --- /dev/null +++ b/python/triton/impl/base.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +from functools import wraps +from typing import TypeVar + +T = TypeVar("T") + +TRITON_BUILTIN = "__triton_builtin__" + + +def builtin(fn: T) -> T: + """Mark a function as a builtin.""" + assert callable(fn) + + @wraps(fn) + def wrapper(*args, **kwargs): + if "_builder" not in kwargs or kwargs["_builder"] is None: + raise ValueError( + "Did you forget to add @triton.jit ? " + "(`_builder` argument must be provided outside of JIT functions.)" + ) + return fn(*args, **kwargs) + + setattr(wrapper, TRITON_BUILTIN, True) + + return wrapper + + +def is_builtin(fn) -> bool: + """Is this a registered triton builtin function?""" + return getattr(fn, TRITON_BUILTIN, False) + + +def extern(fn: T) -> T: + """A decorator for external functions.""" + return builtin(fn) diff --git a/python/triton/language/__init__.py b/python/triton/language/__init__.py index 6b0058dd550c..4618794a0b32 100644 --- a/python/triton/language/__init__.py +++ b/python/triton/language/__init__.py @@ -1,4 +1,175 @@ -# flake8: noqa: F401 -from . import core, extern, libdevice, random -from .core import * -from .random import * +"""isort:skip_file""" +# Import order is significant here. + +from ..impl import ( + ir, + builtin, +) +from .core import ( + abs, + arange, + argmin, + argmax, + atomic_add, + atomic_and, + atomic_cas, + atomic_max, + atomic_min, + atomic_or, + atomic_xchg, + atomic_xor, + bfloat16, + block_type, + cat, + cdiv, + constexpr, + cos, + debug_barrier, + dot, + dtype, + exp, + fdiv, + float16, + float32, + float64, + float8, + function_type, + int1, + int16, + int32, + int64, + int8, + load, + log, + max, + max_contiguous, + maximum, + min, + minimum, + multiple_of, + num_programs, + pi32_t, + pointer_type, + printf, + program_id, + ravel, + reshape, + sigmoid, + sin, + softmax, + sqrt, + store, + sum, + swizzle2d, + tensor, + trans, + triton, + uint16, + uint32, + uint64, + uint8, + umulhi, + view, + void, + where, + xor_sum, + zeros, + zeros_like, +) +from .random import ( + pair_uniform_to_normal, + philox, + philox_impl, + rand, + rand4x, + randint, + randint4x, + randn, + randn4x, + uint32_to_uniform_float, +) + + +__all__ = [ + "abs", + "arange", + "argmin", + "argmax", + "atomic_add", + "atomic_and", + "atomic_cas", + "atomic_max", + "atomic_min", + "atomic_or", + "atomic_xchg", + "atomic_xor", + "bfloat16", + "block_type", + "builtin", + "cat", + "cdiv", + "constexpr", + "cos", + "debug_barrier", + "dot", + "dtype", + "exp", + "fdiv", + "float16", + "float32", + "float64", + "float8", + "function_type", + "int1", + "int16", + "int32", + "int64", + "int8", + "ir", + "load", + "log", + "max", + "max_contiguous", + "maximum", + "min", + "minimum", + "multiple_of", + "num_programs", + "pair_uniform_to_normal", + "philox", + "philox_impl", + "pi32_t", + "pointer_type", + "printf", + "program_id", + "rand", + "rand4x", + "randint", + "randint4x", + "randn", + "randn4x", + "ravel", + "reshape", + "sigmoid", + "sin", + "softmax", + "sqrt", + "store", + "sum", + "swizzle2d", + "tensor", + "trans", + "triton", + "uint16", + "uint32", + "uint32_to_uniform_float", + "uint64", + "uint8", + "umulhi", + "view", + "void", + "where", + "xor_sum", + "zeros", + "zeros_like", +] diff --git a/python/triton/language/core.py b/python/triton/language/core.py index 34fd6356889f..2abc82b0ceec 100644 --- a/python/triton/language/core.py +++ b/python/triton/language/core.py @@ -1,13 +1,14 @@ from __future__ import annotations from enum import Enum -from functools import wraps -from typing import List +from typing import Callable, List, TypeVar import triton -from . import semantic +from . import builtin, semantic from triton._C.libtriton.triton import ir +T = TypeVar('T') + def _to_tensor(x, builder): if isinstance(x, bool): @@ -17,41 +18,28 @@ def _to_tensor(x, builder): if -2**31 <= x < 2**31: return tensor(builder.get_int32(x), int32) elif 2**31 <= x < 2**32: - return tensor(builder.get_uint32(x), uint32) + return tensor(builder.get_int32(x), uint32) elif -2**63 <= x < 2**63: return tensor(builder.get_int64(x), int64) elif 2**63 <= x < 2**64: - return tensor(builder.get_uint64(x), uint64) + return tensor(builder.get_int64(x), uint64) else: raise RuntimeError(f'Nonrepresentable integer {x}.') elif isinstance(x, float): return tensor(builder.get_float32(x), float32) elif isinstance(x, constexpr): - if x.value is None: - return None return _to_tensor(x.value, builder) elif isinstance(x, tensor): return x - elif x is None: - return None assert False, f'cannot convert {x} to tensor' -def builtin(fn): - @wraps(fn) - def wrapper(*args, **kwargs): - if '_builder' not in kwargs or \ - kwargs['_builder'] is None: - raise ValueError("Did you forget to add @triton.jit ? (`_builder` argument must be provided outside of JIT functions.)") - return fn(*args, **kwargs) - - return wrapper - - class dtype: SINT_TYPES = ['int1', 'int8', 'int16', 'int32', 'int64'] UINT_TYPES = ['uint8', 'uint16', 'uint32', 'uint64'] FP_TYPES = ['fp8', 'fp16', 'bf16', 'fp32', 'fp64'] + CUSTOMIZED_FP_TYPES = ['fp8'] + STANDARD_FP_TYPES = ['fp16', 'bf16', 'fp32', 'fp64'] OTHER_TYPES = ['void'] class SIGNEDNESS(Enum): @@ -133,6 +121,12 @@ def is_uint64(self): def is_floating(self): return self.name in dtype.FP_TYPES + def is_customized_floating(self): + return self.name in dtype.CUSTOMIZED_FP_TYPES + + def is_standard_floating(self): + return self.name in dtype.STANDARD_FP_TYPES + def is_int_signed(self): return self.name in dtype.SINT_TYPES @@ -146,7 +140,7 @@ def is_bool(self): return self.is_int1() def is_void(self): - return self.name == 'void' + raise RuntimeError("Not implemented") def is_block(self): return False @@ -216,7 +210,7 @@ def __init__(self, element_ty: dtype, address_space: int = 1): self.name = self.__str__() def to_ir(self, builder: ir.builder) -> ir.pointer_type: - return ir.type.make_ptr(self.element_ty.to_ir(builder), 1) + return builder.get_ptr_ty(self.element_ty.to_ir(builder), 1) def __str__(self): return f'pointer<{self.element_ty}>' @@ -241,22 +235,27 @@ def scalar(self): class block_type(dtype): - def __init__(self, element_ty: dtype, shape: List[int]): + def __init__(self, element_ty: dtype, shape: List): self.element_ty = element_ty - # FIXME: - # block_type's shape is a list of int - # while tensor's shape is a list of constexpr + + # Note that block_type's shape is a list of int + # while tensor's shape is a list of constexpr. + + # shape can be empty ([]) when an input is a 0D tensor. + if not shape: + raise TypeError('0d block_type is forbidden') + if isinstance(shape[0], constexpr): + shape = [s.value for s in shape] + self.shape = shape self.numel = 1 - for i, s in enumerate(self.shape): - if isinstance(s, constexpr): - self.shape[i] = s.value - self.numel *= self.shape[i] + for s in self.shape: + self.numel *= s self.name = self.__str__() def to_ir(self, builder: ir.builder) -> ir.block_type: - return ir.type.make_block(self.element_ty.to_ir(builder), self.shape) + return builder.get_block_ty(self.element_ty.to_ir(builder), self.shape) def __str__(self): return f'<{self.shape}, {self.element_ty}>' @@ -284,28 +283,17 @@ def scalar(self): class function_type(dtype): - def __init__(self, ret_type: dtype, param_types: List[dtype]) -> None: - self.ret_type = ret_type + def __init__(self, ret_types: List[dtype], param_types: List[dtype]) -> None: + self.ret_types = ret_types self.param_types = param_types def __str__(self): - return f'fn ({self.param_types}) -> {self.ret_type}' + return f'fn ({self.param_types}) -> {self.ret_types}' def to_ir(self, builder: ir.builder): ir_param_types = [ty.to_ir(builder) for ty in self.param_types] - return ir.type.make_function(self.ret_type.to_ir(builder), ir_param_types) - - -class tuple_type(dtype): - def __init__(self, element_types: List[dtype]) -> None: - self.element_types = element_types - - def __str__(self): - return f'<{self.element_types}>' - - def to_ir(self, builder: ir.builder): - ir_element_types = [ty.to_ir(builder) for ty in self.element_types] - return ir.struct_type.get(ir_element_types, True) + ret_types = [ret_type.to_ir(builder) for ret_type in self.ret_types] + return builder.get_function_ty(ir_param_types, ret_types) # scalar types @@ -346,83 +334,96 @@ def __init__(self, value): def __repr__(self) -> str: return f"constexpr[{self.value}]" - def __bool__(self): - return bool(self.value) + def __add__(self, other): + return constexpr(self.value + other.value) - def __ge__(self, other): - other = other.value if isinstance(other, constexpr) else other - return self.value >= other + def __radd__(self, other): + return constexpr(other.value + self.value) + + def __sub__(self, other): + return constexpr(self.value - other.value) + + def __rsub__(self, other): + return constexpr(other.value - self.value) + + def __mul__(self, other): + return constexpr(self.value * other.value) + + def __mod__(self, other): + return constexpr(self.value % other.value) + + def __rmul__(self, other): + return constexpr(other.value * self.value) + + def __truediv__(self, other): + return constexpr(self.value / other.value) + + def __rtruediv__(self, other): + return constexpr(other.value / self.value) + + def __floordiv__(self, other): + return constexpr(self.value // other.value) + + def __rfloordiv__(self, other): + return constexpr(other.value // self.value) def __gt__(self, other): - other = other.value if isinstance(other, constexpr) else other - return self.value > other + return constexpr(self.value > other.value) - def __le__(self, other): - other = other.value if isinstance(other, constexpr) else other - return self.value <= other + def __rgt__(self, other): + return constexpr(other.value > self.value) + + def __ge__(self, other): + return constexpr(self.value >= other.value) + + def __rge__(self, other): + return constexpr(other.value >= self.value) def __lt__(self, other): - other = other.value if isinstance(other, constexpr) else other - return self.value < other + return constexpr(self.value < other.value) + + def __rlt__(self, other): + return constexpr(other.value < self.value) + + def __le__(self, other): + return constexpr(self.value <= other.value) + + def __rle__(self, other): + return constexpr(other.value <= self.value) def __eq__(self, other): - other = other.value if isinstance(other, constexpr) else other - return self.value == other + return constexpr(self.value == other.value) + + def __ne__(self, other): + return constexpr(self.value != other.value) + + def __bool__(self): + return bool(self.value) + + def __neg__(self): + return constexpr(-self.value) + + def __pos__(self): + return constexpr(+self.value) + + def __invert__(self): + return constexpr(~self.value) def __call__(self, *args, **kwds): return self.value(*args, **kwds) - def to(self, dtype, bitcast=False, _builder=None): - if dtype in [float8, float16, bfloat16]: - raise ValueError("floating point constexpr must be float64") - if dtype.is_int(): - ret_ty = int - elif dtype.is_bool(): - ret_ty = bool - elif dtype.is_floating(): - ret_ty = float - return constexpr(ret_ty(self.value)) - class tensor: - # infer dtype from ir type - @staticmethod - def _to_dtype(ir_type): - # block type - if ir_type.is_block(): - scalar_ty = tensor._to_dtype(ir_type.scalar) - return block_type(scalar_ty, ir_type.get_block_shapes()) - # pointer type - if ir_type.is_ptr(): - element_ty = tensor._to_dtype(ir_type.element) - return pointer_type(element_ty) - # primitive type - if ir_type.is_void(): return void - if ir_type.is_int1(): return int1 - if ir_type.is_int8(): return int8 - if ir_type.is_int16(): return int16 - if ir_type.is_int32(): return int32 - if ir_type.is_int64(): return int64 - if ir_type.is_fp8(): return float8 - if ir_type.is_fp16(): return float16 - if ir_type.is_bf16(): return bfloat16 - if ir_type.is_fp32(): return float32 - if ir_type.is_fp64(): return float64 - raise ValueError(f"Unsupported type {ir_type.repr()}") - def __init__(self, handle, type: dtype): # IR handle self.handle = handle # Block shape self.shape = (1, ) - if self.handle.type.is_block(): - self.shape = self.handle.type.shape + if type.is_block(): + self.shape = type.shape self.numel = 1 for s in self.shape: self.numel *= s - is_pow2 = (self.numel and (not (self.numel & (self.numel - 1)))) - if not is_pow2: - raise ValueError("Triton tensors must have a power-of-two number of elements") self.numel = constexpr(self.numel) self.type = type # Tensor type (can be block_type) # Following the practice in pytorch, dtype is scalar type @@ -580,22 +581,36 @@ def __ne__(self, other, _builder=None): other = _to_tensor(other, _builder) return semantic.not_equal(self, other, _builder) + @builtin + def logical_and(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.logical_and(self, other, _builder) + + @builtin + def logical_or(self, other, _builder=None): + other = _to_tensor(other, _builder) + return semantic.logical_or(self, other, _builder) + @builtin def __getitem__(self, slices, _builder=None): if isinstance(slices, slice): slices = [slices] - src_shape = self.shape - dst_shape = [] - curr = 0 - for sl in slices: + ret = self + n_inserted = 0 + for dim, sl in enumerate(slices): if isinstance(sl, constexpr) and sl.value is None: - dst_shape.append(1) + ret = semantic.expand_dims(ret, dim + n_inserted, _builder) + n_inserted += 1 elif sl == slice(None, None, None): - dst_shape.append(src_shape[curr].value) - curr += 1 - ret = semantic.reshape(self, dst_shape, _builder) + pass + else: + assert False, "unsupported" return ret + @property + def T(self): + assert False, "Transposition must be created by the AST Visitor" + @builtin def to(self, dtype, bitcast=False, _builder=None): if isinstance(bitcast, constexpr): @@ -685,20 +700,6 @@ def zeros(shape, dtype, _builder=None): return semantic.zeros(shape, dtype, _builder) -# ----------------------- -# dequantize -# ----------------------- - - -@builtin -def dequantize(input, scale, shift, nbit, dst_ty=float16, _builder=None): - """ - Tries to dequantize the input to given dtype - """ - nbit = _constexpr_to_value(nbit) - return semantic.dequantize(input, scale, shift, nbit, dst_ty, _builder) - - # ----------------------- # Shape Manipulation # ----------------------- @@ -731,7 +732,12 @@ def broadcast_to(input, shape, _builder=None): @builtin -def cat(input, other, _builder=None): +def trans(input, _builder=None): + return semantic.trans(input, _builder) + + +@builtin +def cat(input, other, can_reorder=False, _builder=None): """ Concatenate the given blocks @@ -739,14 +745,19 @@ def cat(input, other, _builder=None): :type input: :param other: The second input tensor. :type other: + :param reorder: Compiler hint. If true, the compiler is + allowed to reorder elements while concatenating inputs. + Only use if the order does not matter (e.g., result is + only used in reduction ops) """ - return semantic.cat(input, other, _builder) + return semantic.cat(input, other, can_reorder, _builder) @builtin -def reshape(input, shape, _builder=None): +def view(input, shape, _builder=None): """ - Tries to reshape the given tensor to a new shape. + Returns a tensor with the same elements as `input` but a different shape. + The order of the elements may not be preserved. :param input: The input tensor. :type input: @@ -755,20 +766,26 @@ def reshape(input, shape, _builder=None): """ shape = [x.value for x in shape] - return semantic.reshape(input, shape, _builder) + return semantic.view(input, shape, _builder) +@builtin +def reshape(input, shape, _builder=None): + # TODO: should be more than just a view + shape = [x.value for x in shape] + return semantic.view(input, shape, _builder) + # ----------------------- # Linear Algebra # ----------------------- @builtin -def dot(input, other, trans_a=False, trans_b=False, allow_tf32=True, _builder=None): +def dot(input, other, allow_tf32=True, _builder=None): """ Returns the matrix product of two blocks. - The two blocks must be two dimensions and have compatible inner dimensions. + The two blocks must be two-dimensional and have compatible inner dimensions. :param input: The first tensor to be multiplied. :type input: 2D tensor of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`} @@ -776,7 +793,7 @@ def dot(input, other, trans_a=False, trans_b=False, allow_tf32=True, _builder=No :type other: 2D tensor of scalar-type in {:code:`float16`, :code:`bfloat16`, :code:`float32`} """ allow_tf32 = _constexpr_to_value(allow_tf32) - return semantic.dot(input, other, trans_a, trans_b, allow_tf32, _builder) + return semantic.dot(input, other, allow_tf32, _builder) # ----------------------- @@ -814,7 +831,7 @@ def load(pointer, mask=None, other=None, cache_modifier="", eviction_policy="", @builtin -def store(pointer, value, mask=None, eviction_policy="", _builder=None): +def store(pointer, value, mask=None, _builder=None): """ Stores :code:`value` tensor of elements in memory, element-wise, at the memory locations specified by :code:`pointer`. @@ -831,46 +848,27 @@ def store(pointer, value, mask=None, eviction_policy="", _builder=None): value = _to_tensor(value, _builder) if mask is not None: mask = _to_tensor(mask, _builder) - return semantic.store(pointer, value, mask, eviction_policy, _builder) + return semantic.store(pointer, value, mask, _builder) # ----------------------- # Atomic Memory Operations # ----------------------- -@builtin -def atomic_cas(pointer, cmp, val, _builder=None): - """ - Performs an atomic compare-and-swap at the memory location specified by :code:`pointer`. - - Return the data stored at :code:`pointer` before the atomic operation. - - :param pointer: The memory locations to compare-and-swap. - :type pointer: Block of dtype=triton.PointerDType - :param cmp: The values expected to be found in the atomic object - :type cmp: Block of dtype=`pointer.dtype.element_ty` - :param val: The values to copy in case the expected value matches the contained value. - :type val: Block of dtype=`pointer.dtype.element_ty` - """ - cmp = _to_tensor(cmp, _builder) - val = _to_tensor(val, _builder) - return semantic.atomic_cas(pointer, cmp, val, _builder) - - -def _add_atomic_docstr(name): +def _add_atomic_docstr(name: str) -> Callable[[T], T]: - def _decorator(func): + def _decorator(func: T) -> T: docstr = """ Performs an atomic {name} at the memory location specified by :code:`pointer`. Return the data stored at :code:`pointer` before the atomic operation. - :param pointer: The memory locations to apply {name}. + :param pointer: The memory locations to compare-and-swap. :type pointer: Block of dtype=triton.PointerDType - :param val: The values to {name} in the atomic object. + :param cmp: The values expected to be found in the atomic object + :type cmp: Block of dtype=`pointer.dtype.element_ty` + :param val: The values to copy in case the expected value matches the contained value. :type val: Block of dtype=`pointer.dtype.element_ty` - :param mask: If mask[idx] is false, do not apply {name}. - :type mask: Block of triton.int1, optional """ func.__doc__ = docstr.format(name=name) return func @@ -878,6 +876,14 @@ def _decorator(func): return _decorator +@builtin +@_add_atomic_docstr("compare-and-swap") +def atomic_cas(pointer, cmp, val, _builder=None): + cmp = _to_tensor(cmp, _builder) + val = _to_tensor(val, _builder) + return semantic.atomic_cas(pointer, cmp, val, _builder) + + @builtin @_add_atomic_docstr("exchange") def atomic_xchg(pointer, val, mask=None, _builder=None): @@ -972,9 +978,9 @@ def fdiv(x, y, ieee_rounding=False, _builder=None): return semantic.fdiv(x, y, ieee_rounding, _builder) -def _add_math_1arg_docstr(name): +def _add_math_1arg_docstr(name: str) -> Callable[[T], T]: - def _decorator(func): + def _decorator(func: T) -> T: docstr = """ Computes the element-wise {name} of :code:`x` @@ -1021,9 +1027,9 @@ def sqrt(x, _builder=None): # Reductions # ----------------------- -def _add_reduction_docstr(name): +def _add_reduction_docstr(name: str) -> Callable[[T], T]: - def _decorator(func): + def _decorator(func: T) -> T: docstr = """ Returns the {name} of all elements in the :code:`input` tensor along the provided :code:`axis` @@ -1077,19 +1083,6 @@ def xor_sum(input, axis, _builder=None): axis = _constexpr_to_value(axis) return semantic.xor_sum(input, axis, _builder) -# ----------------------- -# Utilities -# ----------------------- - - -@builtin -def globaltimer(_builder=None): - return semantic.globaltimer(_builder) - - -@builtin -def clock(_builder=None): - return semantic.clock(_builder) # ----------------------- # Internal for debugging @@ -1189,7 +1182,7 @@ def sigmoid(x): @triton.jit @_add_math_1arg_docstr("softmax") -def softmax(x, ieee_rounding: constexpr = False): +def softmax(x, ieee_rounding=False): z = x - triton.language.max(x, 0) num = triton.language.exp(z) den = triton.language.sum(num, 0) @@ -1204,13 +1197,13 @@ def ravel(x): :param x: the input tensor :type x: Block """ - return triton.language.reshape(x, [x.numel]) + return triton.language.view(x, [x.numel]) @triton.jit def swizzle2d(i, j, size_i, size_j, size_g): """ - transformes indices of a row-major size_i*size_j matrix into those + Transforms indices of a row-major size_i*size_j matrix into those of one where indices are row major for each group of size_j rows. For example, for size_i = size_j = 4 and size_g = 2, it will transform [[0 , 1 , 2 , 3 ], @@ -1243,26 +1236,22 @@ def swizzle2d(i, j, size_i, size_j, size_g): @triton.jit def zeros_like(input): return zeros(input.shape, input.dtype) -# ----------------------- -# Dynamic Parallelism -# ----------------------- - -# class LaunchProxy: -# def __init__(self, fn, args, constants, grid, num_warps) -> None: -# self.args = args -# self.grid = grid -# self.constants = constants -# self.num_warps = num_warps -# self.fn = fn - - -# @builtin -# def launch(fn, args, grid, num_warps=None, _builder=None): -# constants = {i: x for i, x in enumerate(args) if isinstance(x, constexpr)} -# args = [_to_ir(x, builder=_builder) for x in args if not isinstance(x, constexpr)] -# grid = [_to_ir(x, builder=_builder) for x in grid] -# if num_warps is None: -# num_warps = _to_ir(4, builder=_builder) -# return LaunchProxy(fn, args, constants, grid, num_warps) +@builtin +def printf(prefix, *args, _builder=None): + import string + new_prefix = prefix + if isinstance(prefix, constexpr): + new_prefix = prefix.value + assert isinstance(new_prefix, str), f"{new_prefix} is not string" + b_ascii = True + for ch in new_prefix: + if ch not in string.printable: + b_ascii = False + break + assert b_ascii, f"{new_prefix} is not an ascii string" + new_args = [] + for arg in args: + new_args.append(_to_tensor(arg, _builder)) + return semantic.printf(new_prefix, new_args, _builder) diff --git a/python/triton/language/extern.py b/python/triton/language/extern.py index 1f3c9371cb47..400ba6645ed8 100644 --- a/python/triton/language/extern.py +++ b/python/triton/language/extern.py @@ -6,7 +6,6 @@ def dispatch(func, lib_name: str, lib_path: str, args: list, arg_type_symbol_dict: dict, ret_shape: tuple, _builder=None): ''' Dispatch a function to a library - :param func: the function to dispatch :param lib_name: the name of the library :param lib_path: the path of the library @@ -14,7 +13,6 @@ def dispatch(func, lib_name: str, lib_path: str, args: list, arg_type_symbol_dic :param arg_type_symbol_dict: the type of the arguments :param ret_shape: the shape of the return value :param _builder: the builder - :return: the return value of the function ''' if len(arg_type_symbol_dict) == 0: @@ -42,20 +40,19 @@ def dispatch(func, lib_name: str, lib_path: str, args: list, arg_type_symbol_dic else: symbol = arg_type_symbol_dict[arg_types][0] ret_type = arg_type_symbol_dict[arg_types][1] - ret_type = core.block_type(ret_type, ret_shape) if ret_shape is not None else ret_type + if ret_shape: + ret_type = core.block_type(ret_type, ret_shape) return core.tensor(func(lib_name, lib_path, symbol, arg_list, ret_type.to_ir(_builder)), ret_type) def elementwise(lib_name: str, lib_path: str, args: list, arg_type_symbol_dict: dict, _builder=None): ''' Dispatch an elementwise function to a library - :param lib_name: the name of the library :param lib_path: the path of the library :param args: the arguments of the function :param arg_type_symbol_dict: the type of the arguments :param _builder: the builder - :return: the return value of the function ''' dispatch_args = args.copy() @@ -87,27 +84,5 @@ def elementwise(lib_name: str, lib_path: str, args: list, arg_type_symbol_dict: dispatch_args[i], _ = semantic.binary_op_type_checking_impl( dispatch_args[i], broadcast_arg, _builder) ret_shape = broadcast_arg.shape - func = getattr(_builder, "create_extern_elementwise") + func = getattr(_builder, "create_external_elementwise") return dispatch(func, lib_name, lib_path, dispatch_args, arg_type_symbol_dict, ret_shape, _builder) - - -class ExternalFunction: - ''' - A wrapper for external functions - ''' - - def __init__(self, fn): - self.fn = fn - - def __call__(self, *args, **kwargs): - if '_builder' not in kwargs or \ - kwargs['_builder'] is None: - raise ValueError("Did you forget to add @triton.jit ? (`_builder` argument must be provided outside of JIT functions.)") - return self.fn(*args, **kwargs) - - -def extern(fn): - ''' - A decorator for external functions - ''' - return ExternalFunction(fn) diff --git a/python/triton/language/libdevice.10.bc b/python/triton/language/libdevice.10.bc old mode 100644 new mode 100755 index ef3ae8d81946e5401289886e62e415d6e10783e8..b2c75a5026df628a34c4095ec05806af50fe0d87 GIT binary patch delta 171121 zcmZ@>30PA{*Un8s5+K2VB4N`2qEu1BPF*koR9alnqJp9ZRKx{{)P=fmvmnNb5;uZ- zurAoD#Sc{66F@};ML_{U4HgyG;#P5K`=3d2uL1k_`MxLKd*{4!=FFK%?#%lR6?HDs zcP^?v)Vym)`__0;m#;1E@Z62zl^3NG7PGxRUzTn>#P*U@OK;w1d$ra`?Ho8>ADgA= zp1r-MzLv?odwacmEwl4lGE4TL=Vq^T(VFz$o4w|Qtw|ra*=tnzn)Kk!UXS5p#AdHc z5o^-N!Lt!-(x<_*k!#ZDz%%$*yxFVnPo@0t^6#cD-|< z9_0k@T(3W0D>nw_dX>LXPRPvldh%WwmYwTW+o5#doa?plgOa){*K6EI<%0vcUN1f> z%Z}xGUHzoob3WH=?q{XEI@fE|-^#?K^|5{EaYK3Y&m6Dt_o~G8-M+c}QB`)aTk#>UGb5OLC=^O} z`s7V%slmSA)`%>q)G&7n#e-o%k&7veXN%au^h=Iae)I`~#Aphi#Sqtwy(|frYINQT zzQv~c)ZuszuLp}I$9N5!{F}Ma34*I)7oAcklk#~Q(STu!z$j@B&jpomDzS@*FQ!)t zEk^Om7V301!Ss#1(ZL<^o&-e4rod*d1RzviI;99fEc!UB%FY|yp~}TIPilRmir6ud z2zM{z9SXza7|sc%CvX|X?9ttgFfx}z)q*HJdBO3G(z9b+=pF&(oG8aEop(9SVm2eJ z$}XaqenWV{Z73r~-Q9!1fEvWiAP>5O*K==KU}7{MwN^hNNzlwK67j11M#n%1(9H~? zf2trVdY!S|c(0l&=PJ7xn!S~;WgS>X7YH^)&*s|DA+mkp|9Np-$_eh@_z!L^#o3F^*!+uj_F?k}>x^>6U48QwO6#;dPKtwVQg8i_kSZjQ$5 zIx1F;4o$pbp%_=6H&xJ4lJc8{V(Pg2J>lO0spsJKXo#ZkH zD=*%axyjP-;Xp0^?q>-T{(Wjgxf6V39=fE$%1?|Oh9%fz>=T6(uoyNIw?TQ8a)LGk z%b`;v(oSrciQTk@Vzbh)JPUjz+lo%tQP}N%ntd(V+PpGxo+x;U2hTTz%cau-GA>Ie zWfyPt)UUHu&@hoes8F-#trBjJqu%2c>RETWOJ?Q?Q?i}EUyc79$GFeipc%ay-$VUp zjje(T#VNEJcE;RNxAuL4{Mj9xcGgI?4XTcl>es%z08g^S%Vt*Sg+ea7M&DQQuiNK2 zdHkV_{vi*7>FqpUFXV#2tD3(f+gZNQN)g9+#mg0&%1iqxFw25C>TAMP{bgxnL~@=K z*@q3ydPUerS8KtZx(MvEUkgZkzPF;_n*uye{)PZxU6o2kC+F!Az@DLWxNmeIqguQZ z99V)JcpFIqXuTDp@48qO11IHc^U+eIE)VWjSf^+YXCsL969hlg9M-qQpnbV1096D^s2ONR&pLC z6zt1}WxXft+pC4U!GTQVK!=L7*9=m);p}yU1Kr3v2;Hd3{YZA>TR&bEe|3`ObD|f8KC}nyMag+um%WV@Vay__^=82 z@JUPhfa`q}37logdHmso5C8b_8?=D=8u{=tTub`!7kk3IQny&-Lrw1Aqz{q)JUu@r z$+Cm+K^j1Fx2b?k7!E#!4bS?U@IiH5yAOQGM?U=1Ztwv|smBjg6xdvWo+o{n;Lm@q z1O`)knbuue_lF6ZvUWi_<~Ugc7QdS?`OZ zIuj<+>(aJ}xp~@=D8)u*IXRk!N&5}q?@;AARHd_ywAX|x+}PjG&Etoz9q>rgXjX`a7go*xp~r2D8)u) zxe`7IYo#UNLoM>5*DKNo{cyKljx1uk2ay)30ldA_w|OQL77AyM^0K-HSxn%Rfd%h(u#jvD zCM_}oct<4LJf(yMtrv$iTFqU7^}r``s=z{sER@~9B`wMVc%|vvJo5<)g@B{Hwgcub z6x9b2D!6a>8d6N=+y?t(WUtvYn6$47;Fa-r!u&;atadalj9mxGH5Mh;*sQ_Ej>TyL z%`M6~=-CgbXNTy>=!?fFbnH8jUE_%8-wxnk(*ove7>|h*q0x=X>x1^C_ z^YWw!AS@C-?$@(Ow&)D0uaIocssEUT{k)`DcEf9@+1Sp~HRS zH=d@RauFPGL=MD!w?&Mb0K^j&n>R+!%M(r{qCF`v`rtSFk}P9pL$squL~bF1u!1uW zf>4Tr5Nkh)41ze2Hz*IX63Q8V&(LBytV3Ww1=&v;Atvl;IA;;q>yf==3qF|ym>0+k z$%iB*02EU=%0c^+EX4>QenbWVpxD641OQes04VL_Nc%;Byb29em_+mfBt#-ReqNq< z5(L405?Faw0uqjxFJDql-kFG8|e3*&<4IWVFH|OglvVc%)~tYWJZ6R2b1XWC#B`Y z&%y;ks^XSxDSi$8E}Xo?llZBTTrbxC4qvg8iGH{f$bTKR%~QVxQntoE0qz@rS1t)E z1pxkaurHXrg0xrFQ(Asc$kUVdt$~pr*XDb6Ap16Z9cix((t|y&MF3cQ83|BYufdKc zxi^3bjl{Yeab(c0~3t1z(Q^f%y){*h5fx&qD?R<-JMud14ixJTYn!)42vW^~C8?(?B1 zyd1vv^AR20YPZ;Xa+cJS9-aMTKZe!+9x^&k6-(uc@tAxfT@WSkq1mtI`^27sw?)+% zrRYoyE}0W6-!;wlr);npxqF%=o;{l`XqGSsIsoR|2|l8eJGjO4cYe_eS#J}ou1dnO zAJGf2&wINbcJtzug$v)w4!!c$odv|Kv(cy=|1bHitHKKj4c3u$S1ql^>ki(vw;CT- zR@oQfPNwb9Z0AWadKG@aam6pCyDsXFu)aT;aD!VqYbbBf~trDGs*WjTy21T5! zvU?6UMQzqUZN3UG)@^I&ZjQ42-+l*4o$&=)DzjoH1vhgw)8G}hJ?cE&gTaJ1T`8l* znSJc|%wqaKeu)zFR-4Q{hb7D(8Wg36`doBAVds0;;X(^RKjKx}*QMmHB)okIIM?pO zCttQV&l6PHWmOq#s}vn%RCtRjp}iI7S^OWd)uy*(Ub957w45Wly9yQ#Dsx5)OqJ@- z=~QV~ys^u~8bN8Au&v22O6UhpxZ!?fmBiXWyF>~AvU>m!emS265S+(urj>i^NPwy| zRfn^0lmr2c_mf}l;UUKe4CO*SU|?;4LA$aSFvO_kv$VfsqAI(>Dy0l9IjFGnj1j(3 z8UVmuWPV{+Hp}EPSgY8F`g}mSpUzu!waTufN@#KErS3|kx4&trKyy*{1lA%7QL#a9 z{Kz#B7V~1w(qg*6pNMmrPSkPN8>@mY%(}|*`N4+G7~IS)nGPLPF}fN$2rYI{7z}zCZ_`2|J3!+s19t_-=D!wi* z4v>fb5YdJH)N7lW*$D0|g7fH?PQrOH&jFlt2EesdeS!Pdg?CR7r3E-6+P5w)y?x;t z&7Oy{k;PPD&6B7`NPsCEQ!!(V>A&Z&I| zxbqanT^t@nx-)?J6x=C7?qscNAX_)vrH8;FvzR`U=#S`>R>?1V;&i0MIr_-cV(y zsuo)G%i+9D^q=h{tFp_l_Len5Y!CI`yiOQYOrHaheKKwrE6G1fii`&St(3s+xp2E> zi;|V>AEklYJ9fRwRt1%FI!UH?bF_Zq*Jnd+hcdV0t);r#YD~^v!;z0`dWf~u2By4I z?_7=b!XB#6tj7A`+Y`e#eo*GNpBOjwBs_kX+Gw?ET>YYzOB(+5sE&x>6n(a!u|pTS z{jl>><#?M}|KxHs zgeFR@5jdVly={7^o}t8|>{nXFb^*>vd#*mO#8Q}B{JEhi&n~I`*I;w7qw4KzFehrm zIrWJ(*gR~%+Wlv2&@|=%F|jN*L!i~eQq{x1k9~#sA#*jY4y|bkgs%*XWV1dg1AB8r zSN`#6xv=BynC)%Q{Gil*$iri)FD*y5dYqim`-%GS&zJ}6b~O}PIrSfP!_Qcs9;;B9 zz?2qd*0}p;+z>|7Uuy1JEP|%?DN>6zVr~KPN2Krmjr6x2PxHog6a}ow?R}h-2_Sdx zANuM^eP3MI5inMLXf4La>ebb2v0elFLX81Org~CaYD3btv9iYfarW#W2y}g^%N2a-yNy_p`ssf#Z+^H`G6@!}^MY2X?LV z)$w5-+|a|dN!x@SCDXSjmQf+uP-2#x^nv^LZ=?IE&Z*$YveaVBtZ~mux1tAsKH3fs zj!gX#JxIHV9(;CWsuy0wdRm8s$ph?c#~iymL%(U^|GMMp%(~90w&|e3*V8gUI$N8x z6T&`y#_cZsQSwRoQ$Kiq|M*#Wp1OS*)U-VH52)$S(rWbJ&m{EVh_ePgNc)5yd|s2# zB~Jric;HFDM|Uh5^X&cVE?Ic%Om6eYcmIxg=fIBsJpK>J%Uj2C^ZP0O7XrN&`k?mF z6^-|LLonX0w2ti*?-wvuZdhlHdsAZHpBq|TzrPT&A+6~baOHE#RrH{AJbLgr@1W45 zBjES#2hfw$U##HCnF93W{lx}BL} z%G`!o<=hV^MM3Nhxz1v0vC%<~(3B09RS!^a!T*l!^XsEBcHKdz5L>tLS6fl`g(j{C z)TT!ZqWRDy##L3Mv2n~EomQ^`GgkPFcPw_wCz-HtL#ic!7UsC4>$c?(Q~b zJeud`xax_{^$EH8XOC48qwyBBCSGaJ5O+z5pcfwMfm&4Ag$I;P>jG*GLugZO2kZCy zH@YNY%%{o^)?EQY%iiPf7ZdfFrnGmV=|P>Quf3lt*MOUpzur4z4wy1z}TRf*~-((*L^`9+w7=m2|oXrk_*KurHymsC{%*G8%4Vt~`m| zHb;W_G`BekCbu!~JnU}vFz#4D!4ob;YBwgWhhzE%%&@6IOu#W<&{27qQKDYB-J*wj zm@Tav?Gts5`qeW!PkoI_V+?Qc@nRXff8~OkP*-kbcMx7(&%7ni>}&(|^cL(`v7QOLj4 z+EKYueg7(Ek8kLdI6tiWRO!ZuM19dmbR)*F+mhahRfFyB8We|Fr`a%9uXtH|`>KQ= zTJ|_|rHXc^bK*mOM9!G8Slh*m%_v67H6A>a=Zjh);^

%9pR@Y4?BX`h5~}|3I&< z^YE(QwF z7=40aTtF(U=<3(3`lX#yZShMF_1evtBTiYm@x*Mw1!387_EFww7sC$IN8=8Y_OV#U zrL?~Y_DEES61$5~P2HaQP73;=x6*!l2$k{tqD&%LEg{Ugh z5X$D1A~B0n#OWEZb|W9-vm+8*hB$kBWnrvELCF3JviE94&heeWIq?i+)i58dq#2x^ zY)pGZN44Zq69rbS(RN7-o!Y*wM}^NhL1F%6RJdjyDm_YY{{E>YMvX@jN`~8mI zkv6~Z8E%x%ez9V%f=l_y(OW=~u^4CAP^&<#le<$Gx`7Id2nuVCBl65eVL|89OL^Y> zEQ<630$Iv~`$8Lqg|&*m8e-1ktkrOXihB`)>29xjUFY?_Klj zA?vGGQn6q9R3`MGb6JW8D6DymF!3J&RCy9W_cjA)WGaCEa~wcdAkec2^ezHTg<@VO zpqR9YI)F!!kzKpNecqj8aKG&in8&B+d{G650f90R5QGN-fl>zuMRNe5sMsQk!VVL; zjFeY2QlBlpNcK6LP{sNL{2Ddq=r%CJGOxnjiY%~?WDdC?E&4h zDbg*L;gpgqu+p?dO@-1Tqw2!6{|$WT2QfjP)wMLoa+X~1Y@8)v`Xj?kDvh#6jYi410McM z;8Q5$4V%F8PwdXemeZ}J@G?Xj-J_U&G2?{iPRs+tiN%4+{Hee*kl4Z-+--Z~3!&(7 zqF4|mHI{%2c;l0D;Puus9$pfs0La$fbkDsbVpKhSiTCfBOMKyNb_BfdB~K;R7q~O# z`xajC#)65L?->h3htPGj&UPDKL^$xdVLCXl5;?Hbfq2nusF#*r@YWHg;u#UIdlu90 z!z#-nd9bNcVUh+aRgQ#8r6L~rlKCkxAl)~L&mi7;KaB|a5naQ}BE|^gncjJE-rl&m zQ11a$?@S?sSTewroQ>0}>^6V7wyiZ>L)AGlA)p#p2vYYu!D_~Qpfo5PcZi54M<DewHIU_Ab`b0R5-TD$qC%X*T2^JJDKo8Oi3)AMGhBmv z`ErTLXA0xb<50V3Ez};~zmTk5JTJVwS26uJ!W;3ttXji0MEW4Vu*?({?5wU*63+L4 zsA#PYlYu^Thp)SYd&>q8z8`Y(y>JPxp`YmkU?~e$CwGNa*J?RBy7Mi$?aY zUjbQ3xQ-pV#T?nYn0^%X^Y45uQM>H?6R2H_YUf)QleO2~(cZn}jo|}{+UuNdw7`vl zPC^7VMl{L}t1J!+@-?yQDAJh(!H_)G4 z&p#~p7WtU+>r&cBK~#-3^!QP0s!xo>eS)s`nscQ za6U`Hw;`MtU+M?z#k;|I+P9m-i8PU5tGgj+VV8Y2U4bHT*{?cCGx)<)*w5 zHLkiM2^aK-*hy^K$k??{Rd_(_2NNCFzI~HN1zbbNReZ~8BRWpAWIuGA=6mS4!t%k; zaR&a{R##q?U6HAVMM(}gqnQ32)%%oxs2o;$0Xck#GJ*i1EIkAur3j?7dy;$M=aV3SrWY#@qUFAHt?T=Y7e-WHFUbc@%morPe8b@8pD-7AOf+qe`Gsv zK8iqa${NkZ5z08K3V3c=xZra_9={Llk52l9=|@=un{oZxWRx{=)D6g46~EgOD5g{i zI7_^+X>Y0PolG0q89Fg=e&ojyA zu@8NtS0Aa>gptH{;R1nbJ8Z+I!gisq&scX8BjnrYV+nZ_Ph%mWP0Tv=y$7%=)r;Mp zRO(i<9fA~TxrO9OJxWtL&dw7~Ndu41gU5`#WYS~G zzp8DHNtW@6rk!}ozjoW*&^7FJ#)c)YNgN9m8FpN8_B8=-XG^wb5qR6;DF$zio13g} z<@uhwzxPlqcXxd8mq9CcO0v`vvD7jY zf4HM-h~CgdI8O`ZjCpZUz%PZp zaRIXDTNilG<2)L<+FAY{1-&?SE#Z#1r{X=h^8mS1<35mdClF^_6}#<>qaGu|8C|Xo z*?x1YXEZ_-TDuKQvb>EVS4Z_*>$eWLF?S%G1EHt3&a_T|ht23=K`8f5VktIl((3%# z8P=t&{%J7OrGPLt!@7K6w)2FCDayF)Qq@|0!$a|a4A)@~!~;~$hus>i@sK5X!a)G% zHi%!!OU-w%bx(49Bp)Q-<8XRI9Q71*Vb;lLpOf&}oHD3&?4YqB4OZ=a`)i%g0tWX1 zb^S0o$$x0|l@zpOQ?emw1%sZGL5sW~Zg-xYryoQFZGtcVpQNpxrQjgGzR+4X=&5oy zkpcZ&qL>|!x){%7~e>E#-jxzCN?)k+60bH{MJZlM(ke&%HQ|y)S z-4{$WZ?vC!>=Vod=l=rKmEa7IkRcBmwiN*FBmB|R&CJs;$aYR#odty3*OU#KS*x@5Q0+**Z5_K3d=nf1 z-@LlLA$`mA6BHC=0b`Fy34K3BclWNua2!$7ldJiN8NL{eY%^xK)LVglXU+`gUzJAw z7c*Q{tp$5(2FfWjW;lPKqF}B$F+38O>0OB7xPcpAS}jJ*@MXvWGiJDUphEPsIWt@W z)b#&ihU=>-h#9^TIbg;N7Y|aj{0a_yAg1vAt5WR(L==ZX;T}qd`?!&)$GWZo06C5v z_*h8Nx+y*i*;R1h69J$FYWjcC!Uflbh!(yPIbcQ$*Z2Tuw-YP;BVwkTvBGhZlKqZ7 z7C5_+U1{MFh_h=>3l|0eXZOEo;lL+Gv~V@@!9)u;2oeH-v)hRkJ{ECyO{{R-Knu4* zoZYIVQc+e-Axw=MdRflPs`2VC{Dqh`Dz9tyLwvTQ_}Dsfh2!)5d8#0w@Ipaj_TOPED-1Mo0Qf#aKmaIg zIhlyl{2QdXa6^wTJVbxqwh$on685yaY}TmtK%_(tL`Ost_Ou?HO0X9o`&iqaggws5 z^ylpz3`|M_Kyja~ybJ(S5Wu7eF#(|H$+-ytu?Rr2!GQqaRQmId3r-6Cwk#Y;nW4%vq(zGeJ?7^v% z$cZeFGa8Yj5dpiyRzS8dEI_ia04U~VMEm#=iplQr!1o;u!}!4{x`1GsX4Hxiv-~#7 zQZr_`HUd-xz`XY(>AE+hk+TrfoY5Z-2^*c|O8{tUDaRo|jVM5VzF)}EBS2HIFHY`H z0L0#qN)`YO9yzdQG#&1n6fU(Iu$MO>2Liq$?I|NcU;qvT686#?()>kp^Mqp&G zINAv9&~RZgJasqhfEAT!Yx^zXc^nG;g9?Q{Asee1t61UG4vvvr_@@Ci{oH|a{uvF) zj)*pbm8@9E`3D-y>Pk_68^GgcZu4XjusKV0zfgG!#Z#Vrd7nrjTpfr02E6&6Tm(e*?ZalK-xzJdVRBIFNkdj zcD6KzwrSh<^S+{};{sK^7C7!y9>GyJ{jBFtRJ4R0jH6B@QYP}2R67At)*)l46SHE7 zD3;cXf#WWBLX6^x312w!6BV+V2jk?E2z$vbY1@=}c^YJ&IFSzbQObfML0?a~1P-_& z2PXeS0*EImMC_P(c@$uX8{g=X0{IukKso=4qORgBgCKOEAQ&m?=mlP*sMj0$=?1#_ z;0%t~jGTT@e15Ndj8zNnL$o8ZQ3Et}IijhXvD91PD?EW@sS9o3``f@$f51m9b?cA? z#9QrbMWAyk>y9`4o=vrwO#Ggu{m5b1tu-*zjc-fZc-o$dy>aq*#L=E=E5T=*0HjH@ z5CCXJ0E=dm0E)#NZVzYpRZ4WEc$%Wm1^96|9i{v<+a-iOe~w}>*uO;f%Px@iv?zY}2f$uF1356=7GFUCXy?#|0YC==(0ay03(zZ1+oB)}{Cr<%jFMvP90j~n zljL-Wm!or%<3)=Zrs5RZ|2V>wQaA1~=nK9&9TL*e7qliVFv_b;bqy}!T!UBjc*q1? z9-ozdGT@Jf7KR&ZzjLh3EjbX=;~Wx*u%yspz^j;n;8l(1G`VTjh1Q+ZthtnASvR{v zi{vPkFvkbCAC03fMqycGdlO!2rw_8RIZhSS+G*JXDE@~jReP)ir>(9mn9uS*B(P4* zlTL$mF#Lqg?T7Q#u#|R0vH0Yj*`*?$e?b21WQV1esnM!+g32^u5?TSQ(4+a|c|nzY zFK-M>dV}tkpAzDa<)^T+!~wA1+&xS0WYL%&o_aD)us}$!pfvolgrDyzRx8k|;u-rn zx+2Urf)-uBeeU=)hJ#mSp0oD1Yg8-TW@JJ-({Nom;5yaH2DZ@L(_oRfeNeky-Og{u zUrHariVdU84q2M0;ZIFX_8D*O6~eRU$Bv-DLM&3EiY z^N@+=&KAwQ*pZZS&QftYKyrOv+tpnL1~?xP)44vdO&^T+_y+M@--P*y+zwq_RpKBEOO%&XY;d5 zH*4oF!$uLCr#X`LA7O*00Wo*awM7vt?V8iN_W8jkYV z#6}OBxd8aSrHJSL*}9V4EP3qA_B>!g_9WWUB;3H@s2aq8Kk7%lx_Og< zzrG&@c94fZu?H!0V(y0qYY_eW47-X9n460#-aoyV?n@A)xn-iu-}VWP;sWCucX{PU zt*WxCs`Az$0()3hVFurS_60)=o~?qN#LHXbi46?FCEYXV(W9v1fFGb@V-J8LAv+M3 z^$GquTwfZ2t|1;ZInn+P9~#hJ09yFfqa<3GjK@Z#4cxNIE?T6BYo|;0r%J*o7Mp`!wyyzAdN7{}^M= z0oVoWFc8!6tT9CEBv*7D(7K1nozOR5-08+#b*%;X{S8EabV$U{9sw1Zz zxcLc)1TLn%8ohNSA5qZ_mAkz z$piN<&3xbn)O73)6kq1bEo)>4SKe>4EoRUCB}liy_0l!FLd0X=yG|D5m_@@Vl=UK=^0%_F-`MEXSh{@Z=B~)q?m}NwxaJc`O3B z;GtSfh&P=87Wc6fIndpw9y8G0Uox`B)t@{GoakxKMy3KWbmi1j=t1h==)u`72HJW_ zi!(fb^WH_+iT8LmH~)DUl%2Ny6#7VMIs=yPQr{wqd_d;dt~9uvpVZb$%%R&uBP0HU z;p5a{wXYI$?fa*ZQQkB&4Dszdl;dXX?@Ex%x_?PMSBDK&|Ej|h?caMcJF^TAe^R=t zZOSko`~h~WGkfp{{C?M4wW)I8r#F@O_5uQZlRCRo!w_dXH}(m%=6LF_sMzVVsMwp4 zK!q>4JM~*yOYL7P50ETX3E@MPuGM-afp z)9yEP;u4}jR91gQ)&B_}(1n=z!%GhXOFyM|3+3IvxEZCoF*LCf@$frzU%`Ax%|lHq zl_7@ydxybVf*9M0!n-vX8XKH)8-biIAV~G+QeInMG3M&H89lm?GQ}~^LemSnMyLC^ z?SG&~j%oS};u@Tm&o%+fXgZCuXJzlZ#>O8rlEDp)_wCZhK)r9YK132?IryBf#0^i5 zZGi@@t9|_SlT$L(i;o??qW(WQBZ2|2uD+jh&1WDce>V+rR2eIwXAU>{LHk2eFR(#k zaPlxhZVEzjgv{Pk!QH@j4nn@YfpQBOlkWp%OH*${+50C)0I`4Fn^ZH z4%c1;=>e!77Q=&6DUXK%XZ_iUV^HtmKZ3TylU)Yg!>QD>NZxRICDJ{->AM3S+}l2| zT-L#w@#&bWyOH7E^iiF55p%HQJwSEc-F+UE4?XY_1FiT)ty65!ofz_WXpD;R4Thl;jVbEh7pXTSa_78(-Amsr2icwfFqxT8uRQVaT`C#SZeCx0FXRYAaY=fmhp+A5GF z)Fy!dB6H<_tVvySh}r|shRc6e5)^cj3tkE{;Wwar*gZ87BoL(=+yy2VAJpCki*u=a z*p|lBO4-jC8xdIt=is2pvmpVBA79nC2V9jr@iFNTN^X6zuRnfX#WzSEnpWHXjhmn2 zlXr!XNjQW=cbb?(%bw*S{lmE4)m;eg6`z!^26030HZ3^~&vTo+%iN8gbd-F62;8mD zMyiM1O-LW{_sVAMF7XW^{tPpDJe?)e^Dqx^#KUr%G2FqoO7Hs!9H!4^6Cw{(ArMNoQ%r z2`4>~yq)Id50!D&YDqU*54W#b;Z`cM?p1CIjy+=`z|+Me zd~gEiq|=ev86RWhu-ge;}bOBMcI{)M@kQ9pl1>ILm`1mblai3nPeAVC^z z07*ivYCw_$?KT85);|^p$w4wI-F6j~&M?Rq1}Q8i2`JPAGp$5x9x}M&2k63BhV}Xa zzEnstm<&TEn!X0bLqGIyVINdk+f3x~PpC4@dY!ygV3o+F3PBjb@MU2&Yjso_S1HzC zz)`Iyc-~%AF?W`4tdK4(`q|)`04XxGI8v;%A(g{vP&rVVrWR2w(|H!YEL|Ij3bcO$ zx@swSuQ?4>7TyIHuORh;7)c^Ff%ZUJ3B@$`p_nucisd8qLTM8eyO{#TT91oymUIRx zcLbHY-7}fTWzoj-m>a#UYeTyA_^$iJIEO<6le0&Brz-aXfy@+SR*19_Fu;O0_ zbm1K9JPI#M9+E=`0mIIT(;D{A2FXlh*qHD6Y)YRIfOdV`z*x|bu>KHkEUW6trE**A zvfLa36Ri1iUbTLNaqlPiuFUq7T6&;9Lhrn0_4;tGockeuaNT!kr9wq=hnkgb4%M z@LvL-Lh%`jPPO~>MDH6|EyE~iSY{M7{9SJ@XgKtR@k|ODmZq8s8kVMZ5;Q1Hf`+Wf z7_K`ZXb_|g0{*8KagVreTnh6?2M{H{i*RZ%@E@XecxjG{~;merLFb zG&dJC6b281_41QYySbpD{m1a|{$_%P_8+r;FkC~Ln+Y03&kEq28O^5ZyUubrEXsDrnHXxD2%meV}%8 zK||?X?O8a*<%^)9)b)lAZgdqiXqUf-+FMcW=7NSFgR^vp%mfX(X4^#JfUgA&qK;Qk zyB^hUE@;r-?J8)<81>(RhDQ#5(C?L~-@gbNroAv1G)!LJNzl+f00|nTYl&6((ivBv z>j!j)t~V1j$nrZ08u&^7Eoi8@e-(^3G#_(8L*1_KKK~(T5XfcM!F4fm z{b>y4 ziUT*ZCtardMv1$@j@#K=;Y;9?dG1#D(r%D8m*LRk@?rXLiCn%QtEDGxvnA~8c`#PP zNy*}F-*oSDF&Ph|AzTFf^B^ICA58bB|J%D{5A2 zmW*68IB!6h){g2`ezH3a?PO-=kAb7VS}^Buf%74^lp~B1ogKAMo^phKLgz>2rIym( zA?ua;)tcL*U_&|+6a-l-1#c5d3zB!k?r$3tXIaUd2lYuU{m56pYQcK@YExe_R(S!p z+3Aq{%m(X_)KXC+D%!zRvF0ISG*Tpt?O>MohSU8L;q))H=mF-A3rG3r3kfFXBKAl` z;YA!`p5;c8?f8#?H*yqkj=v|!==yFn`J$0?Ix7*%;M_{%accK`qk#}DT?OP4b0q@r zw7C-D;`)3~;Tl}_vqE6m(r1vq$mE1#8CwiasE`xp(u7!F-px^4J!1*JJ#7-3^)4>S zauVV(PY8=9PZM4V$JN52e#lmKPWcr3EKLcAF=|~DKb$zca?B|-ws`G-qdWpOrK?#7Pmg}IkV#Hal zwA@LywhD;+k5(jEE+M*1>!NtOux3u4xE~M$-22gY=6klSiIb~mv|G~cpWvSZYUM-B10UOicAR@VF;9!c2)0jY1)wvinVkbV zgA>xVn#4$mLMcQc>x7^=vD9tiSS8SAtWUDmPqR5L_rK`@qzU_*PV%$TnR6^#!N2Yg zsPeTf*+1RzOw3Cd)aLHBY@yp$PYu#Aj52o-+()MTRs!Kgrp+($%1SuI681+uWA7 zX!md5SD-c%7Ep#OX1D_zwbP0724R7qmJi2B%Wt3oYbGqv42LuCfwuXMn48vm6BbZLgRlUOUDlI6Xahi40FeKH{H3d~ zK=>;mED-kZPdP|T>8e|rvydV0QHGca3;1Jb8=TOI%%rg3W&rP-FkrNH6&5I_a$2Ez z8pBD_X2;Fr%>g{sMl)dnEf~%QN0eB^;WZN$(57=<0zf4KFc%g)f|J5Gn+XdP-@-}Z zKq&1hEKo=|9bn&%?9GG)lQ`{ga5y@mnc(%)hQh(&0MJ!f031>BoNIGo!ACeo9G%ci z*egQOG2*%xa0=OC&S)UK3Jru;Qc6JKL zQajKd2y@l%U5~fNAt{PEVZa{dA@;EO`Su?JVXi7MJKtV21LmrT`Crbr@1h-O2ns>x zQr|+~iC?q>HYuI712sXNv;(+4$0Q!moSbZ(+>D`S+h)(WNoS< z7AVO{h?3l~7oB`>6fWg)#Pc%_$5Dq9jQ8Y5{&7~rHqUl+{=L3?0muoAFQ0z}6@hn@ zdh}CboDfdUHt4nS09B({m^2^Gu_u1i&kEw(-9b760O;tB!F?luFt`B#GzdTxGm-!Z zC~Xuc|Fr;)u_x?PgZM6?+d)PE_FVe}(*FBk$_aS+X$Jd(n9(MCoED&Ol%Cjo7bZ^Mc*Dt0m2KxqNKWUZ09tXO)5Hf&~3POh|1b}t{ZI1_>wLb|tFwr)a0FW+G zECqlE2q6C97YB}U7`~C9TZkokA+|}%qiunC6a_(I-vUSO8-ti0tbrhC??DjUCe8BX z;f8lEh0S7`8s<=w!lErYI0IA+aj5oK+gbalaht4yzL|BKNCy52S|+&h8M%?XZ<6Vq zSEX>`z`N&EleXYnkn7}o7Hxrr1dV;cB(sHtH=KynQaAod$oEp*aIC%Y&H8f?Z^^)I zo|R-joaeBnZ$df(G>eYV68aC?U!1W35QzY4woHR}Vx#@YU}0uTJ{)6DBCIVPCqMZPsec~hnKt2MX zs3oSCa?Pgu6nML>Fm=}84;%=x5b^z~@mUg(+t;PQ@{Pj^@*Tb-xb!3r1NJeLlf_?w zj70Ch3LBi}4-$Ujc>DI?5YQt0h*Bxt_R2%ydeeC2pWhAR_1f%dFkX#1Z9AJx@QQUB zyb`JxK{du>=vM?vXv?;HMym-K!3@ql%{?R|fL7r(BO>7}i{&RnKuG^`OjK#`A^p7) z`SP{>U%u$CKXDklU(`Mj-kq1gar7C(V%3L`eqcto+xzCLVJzLRfw45L*)v2W3M$iF z(Veq^BSCBx;V^v^F*B|0W*Suj;^Vsebn$UFuH1A^{Y<0}D4m$DE5b2bE=|0i#W3gw zmZSTpSHXSEHj);tzC3q)U!zVSbL~UW35-nW&(m7KIVRb!Y3%5XRDQzyS_D?BEA+dm&0E;s|1?7Tg6(;r<`Mf_{-&IYNDbm8y33Fx^#=V z=n|a!f&>X~%DTNqC)2-?UxxCsD(k*J{k$S9<}MBPim=p8uMyi9?TpthEa&|E32~s$ zHbh0I_N(y5WoKbQHS8?!tzVP`DHJ-GPs12UOzVj&o+7M(4qGc z=U5*nL{jW)cHzXTvhv3o9F}MQ1?Onz--7lWzox4AI^gH$qlhpwm1APc4z3__24h{pyE~Ay`N|whDRo)DO zPA<4Evj#sfHuA$+E2NvB^3#(tGbtgr;uQP2;E7S_Fcux)tOnC1AdR`_rs*Y2y!$0-M(7R zx%%aVBtiE*GYKIkBSczt9f`=lCQE=o29Z8q(=7xE20G&2Ho&89^iQvNZIbYsY!UoD zLDl^Z3!-QIT}u<7(OSbF2sCO2aI|rukqz!58W{ZFqJa?CDg;nx02o|Mvix;JnB@%E zrWi_i)P4=qq1}(`sB5pWhWs#Jk3$pN42HwHlgHBQ@Nvv#sEsR78x_|-lWi1V7un)I zpgfRP6J0Zmnf+M@tO3w%+q<;*mtBYKLzI3gr^0wLeNMHvt^sOH_EN6n!V&bdkZL4% zCkqq>(dYspNxtx7P98}H>-Y@*}$J}2)|P} zM)K(roKrs!o>|VFsi`*UYjjhYzaqa68G0LzvnO_arZA6y(

Ii{`tYNrV%@kjF=Z zs7+w8cQPDcRsEfXCksh^(4kvmalObJl3nL*7<54rQjJlvNU}`vq}r~6h=C}ISvYFP zJ#SGnO2;Oag!pp{MfI|QXuJ$U5d7dDNr=it0{*YTQNmy%`TShDm!HDlOF&YJ%5}H^ z9+(s%uFn&pbKfSSal3eCFfk-_W0>K8EQ3vRqH`s+Sj(Av-nzTsL&ROy62gb*J6H(# zAPX@JzTlroAKDiQBL5d0r6GM7>dLKsUTxP_EremKU#B8{_|gHOytAza?%G; zEVF#}@?!c*!UyhcOm-LIa396t9&3evvv%K1VU=G;^?mxScVdu{;Hbwk%b6?T^HqU9 znE%b&YD?x+T{p&f`2VsCx=H*B3m1Gjo&2L5ZA+qab&_yNF8ceH(y?u%256xxdp6Ij zn7)Usy&g-*)&b*VFk*ZR4r{BEtrZayuykn=f~hgUei#bcVrJNO+^I$2P#gj*=pXgu!Jb zMB&nC1R*g%>|Otq^rXO*J;ntfe)GrS;5T&c8}|;@QVAGE2xG%w`BTD;GfOoHLuGqkj_(Z+B?JVe3A^zDAcP3QZAcjj z(Xmv8Ac_nSb?aXk+`!Qve>vvLPT>9we^uiJ@vU38M5vT`0qhCL9@}Fc4cCZquVjWg z!YCFyqB;3x>sV6X*Q66O_QB9^yGG1csmuz;d>H*2^s)C5BWc`&IGK9tLx(lfh0f@BL!Jx33XS{KoL-^ zCP*#rQK_Ou4ORhBF(4w!G8qCw`?{p9A}VUIE?Bo%H&oOFu;PwN6$Le@RYXKos5_F|F4;P4wSp@hOslne>1+bvCxBPzdS>u;Dd1`1A)4xbw-s0Xg6z0#qd zyX`^2qeVlF|3CrTSIgm>kFje$(ySSObv7Zcwpf-d;$@*_3CgnXwvD%psGC;vUVsgU znun>Hx9O5j2W^|Ntfp?;uD}!1I(fha{m`$R*NF4wQSgs;nbM_-4Nw8q2CRU7{ClPX zzTR*bLuWS)?M7I(Ve$2@iIu@&IELOkH2gg+Q@&J(G6qVfywe9prl+@{06V}7+Q`cL z?4KLZd559%4z>9}OVliFLkaOlB&oh)mFVY92v%YTXs0#w_t}jt=)I`nJFJ4yc(_!y z5j2#PM(yg442INO@C$Z;H?#!n0XyI$NT^U^!myl=G)Bi#ElTJq3DfvbC_%u}pqe|- zTd)qj{T;2Lc)%uCqU)jtIV}7Wt+8U62{l-ou_`T{J~J4Dy#>X6K;k1Up?ko-x(V-n zC~;tz&1YIdw+u3JtA;Bm38LeQRb!a9AXW$ckj-Cd+1=+ux~#z17i1+t(BPlCeYj%r z7Wr;o6M9QCddqPBj|#dU${s;K#9Q!+8jF9j*F!(VyRbAINIHk-fVwlaM|1VE8Uj(; zC={*29-frUyYkHS7R;0U2jUX!X_9SAWqPB;C8Osi8-Ny}Xx5(vBpR_9YC7 z>5-AeI|3xzXcBTGFT%3{NVG_j9B$*VlCYq(EmLg)673eWYiW%GL$b(QuviBqY)6{p z;!;_iu|JUbAxYovduS4Q8SevA; zyxAy48RD%#BHfB428Dnj$?_Hul2u?60Zr1rRHm*71(HA{k#?_A*lm@}aroOU!Vdlk z$*8_XKZsr6-$2{I(b9?ZgT_nmLkVz`FHeA*JRt~*8cxU0w0f6S_>n?0iTrV^u^;>n zwgv{Q8m(e@{1&z)^hLK@zLE>A9(hL#V+3Z=^vdQQQdkr8e(-L<0v=)kg3vXfZ~3wF z9^NZA!Al&ZX+sdkJ-l`|@hj>Wlw{Ishtr(CK5Zz|LCJ(wF*59ZJkG4uFd%oL4Pja) z>;48L+7Y%zYhWaL`sB_9i4}8^WN14hA>Yp%{u@ZJ2{&4!eVHuuJ!q(};s~v6V>J5t z6rqNW(unOog02l&#?#E+3U@6=q98*yrk4k}K1$^NEEz%9MC!8#B@R&%v!fUZ<1vEQ z%{}Hz$e~(vl{V5bUUzq8H|h|`Xn$5PG6Q`KC{s=Qgr{4Zb-54tk?3|VR~ zkl2PK#XqYViNQX#DA7PkBpw<;(6y15^M<>X&W4{J!*|V{@P)T{mL6o;T`|?UhyA=D z%S&q8D{?d3_}E+U##b{U$(g=w*JVhHjYGlD>B^8NdA1&Hupt^sM0B-Jq9tUnjqpo) zp`0j14c52Gpn?7`4|&CogsFn7=H8w9l}|1tb@ha+Z9VOiqvWf(7y#Fiz$Am7?^B!z z721}FExah=TZ=DO!(AH!$-K_LN*}&WD0iAZg2fhG# z-I+iSWZIWTWpcxM@P z)Y1fOx8iKcJ{W>z`)~+a;nc_S0$Z~(TM3tLx3uA%CSE)|H;;nxD`1pOpF6*E>Ga+8 z+-47W=-9Ds6rKCo_>|X-Y%LU9pAukGQk&fo+DBy94|8Ei20q;MUdEm4UCu6ra&{r& z&hENvex(0&%=)|4Bf);FhY~;GUxdq6j~Kh19`l92c_>#VimUvLeT7k;Fa1j={s~OH zEXUt#4`A(nXM4c&$Habiy?R%Va7$E3|B~oTN^n`oEKU+%7bl7#R?ZSCH~e?5jkI3O zH(Cw^;1t;bfpQ=p!s66!tVPp8SkUA9-oa=&*jjP2#*zfM-(-$|-^m;*LQYp`+{=Zu zAHV5u3r#i<4X1&h;CNt?-^S&+agYLKj%xc9hrUxdJ({D5eSG$4zE1FXoB*fnmbh!@ zi6OeKuT1|F3t}AR>h7yz>;=={QxY?B-gc+zfd-Gu1Is)v4uojC((vh;e&lqHIU%sD zUINp|%07iTqv~alqb1s-G}JaaaK_xSl4$A_B(2H`Zy0C_zdO*>tRaNyg~~$6;3Se4 zcK8;;XGEbMuMtnJnDxXquM~cKb3*PrivK!?yW2E_V@?ZPj&reOVPc=sOd*8am2QVP z9$k+g3Srp~7!6B&*H-I5D|B;YkF9Vp3Bl+8I3#>7Op(q%7ZkqBmJt3|W?(5X+&5Lz zxLlcvW-)A!aiHg*-wksM4=%k56Tj=Mha?ffdg>6)1+UAE=?nRRvxAu}e}kPBA#RZF zB?R^{T|WK(cBOf_;i04fvhe6Y2dD03qKdB}o?;o2rHv#^+pTGMlp?#ELRf>U!%7<0 zW%r>Y3>5O%(0Uo<2;E6XV7WN+4+#sW+NeBWI(ep3_FT)#2CGxvEs)5$jS{}aRg6Ts zoISN(2^nc=35~0>(-9#74V4TU1uCbiU2Z-i3$nq`DX#%_?ojqFJ`XzP{K1B|YM{S3 z*OIx0^;okN=D2sl>V%|MN%48f0~ut^ZNsY?i);E6^@xN(gH$M>V|r*W`%U!f#h3~F zd$5DD+dTq(rgrWoW(mgId4(|v=E>7M=nR_jMoBAVvetVm4&R$bW>IbqM_8Qio$q5aKcbp>>DHZe7E+zi4l@cj&$X-` z>4bThQMeg>@6kxz&X*mbHL!3-0AEsiy!hpV4u|B5d_7?acKx!(%b8uyPAs@pc83SGW$1 zS=~#%g-8@m3;`-#E?UjhOAA&rFEG_k=-miS6xxU_^zPgiriB#F4yS~vA+Wr{xRSzo z#VKJLbqI8udKn>%LY4@@cc1KU?+nE3!V$2@yA3RT+BJL>3znpQh8jdfF1ICqrjv9R zeQV5v=3^~K5Xr7znC6R`pj+EGAJWk#TI?!w$(!a|6IW!85P%chE2~8W$%>q#2-*gc^yX z^t1+NgY#X~xKCjepJybLUU}pNkdW=9YjR_PWN;Qpq@sjsR5dN3DRw5BP@{!1$EN#? z2Fp7SR^10e2rYb@Olp z^{Cz;Z_YD)#91*2NTMi`gW(K`x_e%UXc~}2G9N$sfU-45@d$HpxvE6Gg=?j7ge zFzRqlpzj9ZR+|mNS(f{Za)90UA{cREaKvfcYwAnisZOGbk~ zI#yc&8a1?rM(SV z=M+c~1t*<4Xbp96-V($}M-AitPmD(N=vdP=&|vMQSHurH7mQl~7&VA-+UXN5AqlZt zh8P(r;d>~eAKfnM(Xo8Z5oqNR^DM(CtG`{ifM@_&x4kfG^Zn(A=&?U@QuSjP`|BR# z*dMw35Ht24cjkUo0Jlv8eAT>G-#k%rS%x?4>qv6VY1BO0Lx$K9NTNlO#zP8*q-Jz% zwB|UFOav0j{xz}zb~(ZpAc;hhP=EH}zADc89@@i=+^~vz7}cZwUph3eY6~uKfPeIG z{MvckVCX0l2jg&jBXKX?WtBm8Cxy$)p@bN^uF9ailS2MU>JT`2^ymbzBz8A~rRi{v zMwu9tS$;EzkzozYV?TkqN{-NVRZNmNod%6r^FU+9XthOy#d+^M3nk(xl3N=X3EjZF z68k$aA~O=xCP_T6fEVL&ht@2MPc0jgVB%$=gIqmhKbRbJxS8@I%p)J zM&f8aEx{4Hq@lzz3ZvmXBVh{8Ya9gUe( zFsf@omQ@ZTSjExYd-MpFK0&@0MlkhW9KrYlUN9rr#Yt`#b!pIjUeM!jb|Dj>^<4ZC*Ip#x95Yg^bOZOP{n7q>r|&ZX<{T;6Jisp2N(4ype4Xr_&fJ#h^aXpSm8HLCR)QV zHm?>j{z47;sYi@P?+NT|&J!&59bKE=6MVBZ_Sw`SAbIQbh*nXKv%85Tb*NHus)bQe zOo%Oe1eGbHNC=FYa&h6W0F9jYNb=69h1Ot=&%1>rw^4(A`X!^0J|WiB3K}(ZZ8WYf z1tJ14KB7TBIK8ALbmQ$B5aSL?#GdY8By=YEtHBDcg{0pPRKr`eQriEwb`OqUG$Mm)^vNY3NOepaXC6<3N{DAfO zbF7{1VR%iIoS`-9q9P}T!Mol>)EGBLMr&}UyELOlD}~|mJ%b?`m$w`x)QrUaDAhES zh(?L1G10VyW~NI!N_2!#&!1fufrbv>gpN;lf!7ri2;fVIuKqh;2*jF!S5F(GIsgxO zM?jzb=XpQ+rLXrCb`=a0vML-V3Ig06Emt~DF~XH`mISxQRW6XSRrx1Aul;H3exh9z znpP&~=R8?XOzxx@!iVTL!C5-|N|WZI7y@unL(oPZ2Z7C;rPUN<7t-_$@cyJDvc^qH z=iC#)NQ{JuWAu>1HSh!mU>U+{oD5+G{Dgl6eGOq{4(G$_JSyPWa|yh2dv+}iZAdp9 zAjX`9nU7F!)t>8Ey>bquZ>>4t;TrIJsGSVDCoqu@Eb}9waMfe&B0hVGkDW#Gk7WzHkV=(Yc=mg zR6y1%Zz>>*J6ZHHj1NEMtQ7uO%!9d6!w+9~smK`Po&znj&a&eb`*|>BT!vOr#>ID` zXPsO2NI#^T9>7HP273OxWsgj~w2eB1bJy$b<0nIuR{K(B+IV-rq70Ze))5&vZ#;KL z96fJrBphJ68ia5*n)Ljz6XuQgAlbI{x;X2+vAo5h#TzCxTd|Uyjfbbw)5hu}6^dpu zaI`6oo;FVCC*I-h2RRyoVLo$y&qKE?Q@-RghZnH^7-IVhH7Yk68I77FHor8(oUb^Z z)>zQ5`|sY+PRXdTu%}^aK&Fp@o;rT+@CH%dQuZ#s1TyBiRpm|_`9-IiTLmNr8EMH) z4&T7ulkqTL>l*}DS{jgpYXEbn{G{o70Y;tGJz5r|53#MvWZ@h((Z424zt1H zm*_L5shgPvsf>PZoV`5Az7T8)V$dJ0kk>AQRZA>`j1S+u_BSlU7FJ=Pf}1}A85Int z)St>Z2TjR7&%FbKy1ik9g z?0{KcSKP*xn+=l0zn^4d7TinVYj)HeKJ)n z%#0_3HIox@fTY_;xS76-Lr_6FnT)fLkT@)kmK7K30*~n>f=%Z zdMl&R*H7$L3L4F*(bRKWy^7Q8JsYAsl3Kb%#lHmC(APs-?&<1N3ZIYfR~|q2PM6dR zmS%eUM8Ll$7vA@z*j3finTcnWp3ixXJ-q`PwF7;*X<4T3BH>hCZ|Q=h$g%1I z57K_00N;x^Vc$CW+a?~JeMo!W<}K)uZ&8P}@CBozJ|KN%l)Qogqnz$H>I1S@>;UQz z4jAS3FX-r6)A{gspkjQ7D$<4RT~-j5K!-I4q>^B8uUeW$?(P_o0vhs@pixzB-%e{t zFXX((8ht`ciXbJU&iVq+m|Hz9aAli80 zJ4U1YfHZO_XtdB8#;%T4?Vw>qjfhkB?`Vyh3pSt7AfFJ!u<#=zQFA~VGYcd-XbHBL zqiyhOFwkkV#%cSHw1nhx_-6vm@fkJR7D|}CrXL=Vs^UN+x53gja&O0oC7>a%Mr)k5 zm(V*-t1jp89l#p)4j>^(>BdOFehRtdZE&XU^p4f0vo^lmd7!FofG%86EKPw?8vUQe zZSgDC;UcaMhv^>iDafLKwgQeRVzjh2njUd#h zJdX*#%^IfjIi990Da4l2Mj`t-F4DeX!v@JIIFzQ?T-emxvjswxJ{w^iE-Suhck?s* z?B>rr&oIkpRJJ?{rrnc@G++Z4LdM? zk>xIy+RMKRo^md&bc6SQ+?M%T&Jaj=gJsngD=~e(o5LAD&Si3jBIh!10(qI~htlgg zU9r7Au?E^jgPHa&J!p7k)T5_fru$*(!J1cWBXtPd>azV{+H2I;D?HH-UJlg#f6>>B zO!+~nWEgrcZDeDZV}uQQ?-lgkEB0U08a3B#yg`HXrfj)L$w<^3lm`8P&U?q=ylhC3 zI~<+24xRUky^@xY+z2OpK*Q)md2iYejK;%*(#UvpRa!$n)UhfTG}yO5Be%}}2Ue_;$}GLmK_Kl;&Tx72M_GR&IwS@Lhe}8ej8c5g; zXpnmQg*1u&W{y7^q(72~m!vQx)rX{ejCX-#5ltewZbKNknSgQ;P`<6&BmsF#FtmRQ zlC)DKsZ*BFB*w9h9M2CxqGU)y{Kd(vcVLq{Xp=kki)j+YZJSWENhp%2mZUQzZHJ__ z#wH+H8bZh48m`;K^0ooVLnLX~q-IEd_h0iHkQD!hHd%I*A(2NrYP^fHAz+3fiS-v} zsDXri4{dVSK7%G{yPY!_Z4!ngHA}J?k|N@;^n-$)zoJ!oLnVO%!KIuVr*sKk5%bUB)I$P}h?B z3JB_2aTla>H%peiA^u}yiRzMW$QkAUPbyux+j_x+C2qU!y2{2siU|;vFLjjkfsygc zKKL;5Wc>_!WHj8gfqQ%_RAMJx3B%3s=RDiz)FBvOxA!W#YZ+h3WsW^ir5CDjnu+a< zjQDni`UBWgw~Y3&5`VFM2Wb3`V{~b+ZM23l!?7=F_@PE+^8rRedAp)!1w7K!FcLfb z#iF+$u@@zF_tMi6>@3FtC_x0EhUV!>MniwQLUQsqaPlEeODuMePc1W69{s-;Pc1&%{eLP;6qStBE%tFI7qU!hyyrJu5r z9CO`jKvkQ8uN6dwO)LGPrz?{dR={gSVL05rn(nq*UnBel@p_2*EvIYKR&PUcKY;6_ zMoC5mBXOuqjS@OaqJ8soI&$d6QI^v|+tZf*;6*t428?dQi;1flInTdS{*n`=AL-kx z4}B+>uXI#lg;FS+bmYynyg8=8om@Ivl5KW+2@h`L2W{_%z|W{my>jnae@3N$kTV@S z+zjk+((H#!hpRp+E#D@F$%cDodk{J@)6sT}!!-z+hE7}6VgHcsw5&%JGeAQ=6E&3C zj~R`Iqw0&>;6XrV(a;&Q9PP(Cz%QxZVZVQC|Cp9gJhDl^5hwvAwAsy!MBCAt29(%C z&>C#QQ8W%TjHnUu&c2z}&^-#D3mV3`sA0%{1{%~oJN}UkL@SovGxw|1xc=bBJKfOWE|p8Oo-3|MA-B2pP{LgxRPy2ac)W!RRHr zSX-~Edsnv`4pa)m#bZKDrP7VzU{2x<4iNC%z;-holWs(p*@!MvGxH;~w>gL_(7%j5 z4f@RM5X)qQc?0nZj^r;0|bDZU1Cq>L-|b+A+Fe<~sO=9r?XO0vMHe=US|jbd7CW(&(S}U;DbV z)`e@`ezT`Co*fjX^!fiO(4rO9qkRc_YxhZPTPSGBYM7L3w6iq&TwVW@&WC!R zbf&_%Dg_R@J{ZPT@gYi0?e`wiTe}0pwU&4N<&!f`aDVO?(EF=-&EGf{bmf7!Z*rnHM*)v z;#r08KM>o+HyT(*n7XC(*4n@C#_248S=`O~Vd1tg-R}f{h-TSxUaC&R!l9ed5Y(_g z_m%U1MR=tzm5N<-`i-gg{T`7PBqsU8uH{{6Ly3%WYv(;%cRmJWc2t(pS-C# zUtl>~Z8=6;O}I@d>l-`E&!MIIrM}}pQ8^A3Wk!h|#_^!yL)qcH*g~0|#=Ib)<^0tJ zVs%^)(Z-JBYp1bBdZ{1hg}K0z!Vtm*o6U~XMZm1FE_LO)xvZI*(?vkf#6OxYnH3cc zKP#6);;RA|fi0dgN9k0|Finbu{2h~EqTD@3nr@WPH8CtY+B$6=vPb+cv^( zRw0WEmR+SX%F>NGqUcu_3%!h^5L@~WY{9bIQf6-@A@%j?LHPeJFJg5|L)B?6{A!y_sdtcod2zqAn(xCLNAHUf^X zK=bna-1JZXy`I zjuIaz$Q=mwy^Vj^2E#83f&}M-Y}WTkuY`paA+)h*hkb}3HyFNS!tX>hJBoVM$bZzd~lA<}Kl`>Zi zp&PSvU|uOK)YEM$AsS~wn@VP5#r2gY5mekfU?`vG-E`*Ogz=({Y1X<7cCg+3fq7GH zhG5H9h&YWROaEtS+Fc_j!$Q)S$+(a-M9?l}GU~}@x`}Ur&})KGVr7w$ag&OA)Kw~B zG3ljL+zzMAZmlAG=b&59K_pY9t`mc#YtJh=Fc!9W>;#3HoAR0~|8X$z3`3rrPMi`v zWsWR*>l%YRN{VNgKo-CsL0G~#qGq~vzXL$w0rdEx$RdlxvB^RN~M4%3dR7C zg(qRATj&cMnHe5k7TNSC@Ptx4qXNWZ7#^{)Bp!H7@yH`PBXMDPByS+(;38b5I77Fh zP^c3=-?3-It{Fh+Q#)K$%?Qap+0E+o z+I$Fvk^;o4fKpwj*@Z0ZiiA`1c<76DXCNh|ZcOPg=!(OvU9qxpAsR6i%Q2nNS<6v= z&im}l{R|G02~<=9nxHy@9btEW7`WO@%Jp42C9aIC1$jnJ!LB$3yW&iNf%Y`m!CIzr z6L*I3se(~r8Og{Ia2MjAMc2;`5F41Pt15jLqY)P)lK8CBlc~DSHoqib0=T{>2Z%2+ zJci2HG~m&tA&=~=#EaoE+2uE;0FTUz;c@lkx4_-bW-;>EcyWBaG)(;CP)`*CrBXbL z1$9=Q1eIH;JMi3QcyvaxDGqoND4rVu;yQ*$e5Pax@R*h$kLs++$|K?B`wHg6RqC@$ zSLE@Ca6yv!7+AU?Kzx@`3Oysw1SNGQDixfSc{4RL@bZmUyqQl4oCWn`LP|kC5i87u%k+$rQozd*;v)zg0;Q$^adSW^?*45Wrh#ZQ zhe_bAhO?5M8eMDeF?pquF^spCo?&N0IkIdlr~0hShXGa#^8;MC0bs+|j185QA@P`?Im4FR6WK&Bs7l0|ocM{*7g7!fEQ0jJD2j>xlQF7Oy~k*D`LLw5}m zf5LLkZ~O&-OjaI+Cx8Ap;Q1AKwsz+jtvpLSCq@I$WQu2vV4{_0RI*!W81O_fJiX75 zO|8JApm-F4;)x7T(OJTn2SBXl2o!xz-GecZ%mv)F9WF~aN4u-cg%>V(Z9W7wn;a;P zW|RzPW0!-FjuKLxGg*a9F5s;n;3_pEWO4y-{X`ui&_*$V;u(RZxPQ1|7-Mq6GX<-d zMpbdWAP%cY1;t!HOTy0O36`QY<9zZ{AON1K-0VPcTwrMDE|%yP?xHRg(Gc9y@m1+L z8MFZek!V)s+AaQFLjPoiL#*-q6N@=d6|+&0WVK{=a>949%I_ehbT8UPG&4P8zLQLY z2Rs8+-uysu5@UjdbL^F10@+H+|If*KF}75D=CerfeD zv#s~MjYxMXq|Jh5R-|jmZX3FEfk7V%H4pmWQL30|aDIv>Jy5)i;VC;OUj;nsRh09e zWBD;W2Cw`Wm+mp(B^3mtq&f@F@_nWNlb-VbUL3X`gS6SR6_H+2NCyOeT9FdzJvCMw8Zp#F{j9Ld2pAlQ*(~N+kiRtSeYVeh=wS)M}hF7P*5hY#Z%_@;twg+ zbKpgxZVtZvGzFGkVBUU8&#`~}0p5%C7qDLwI1`}jnIEwVUW^U>@l8Q@K~@UqC^Hz8 zLA;sBqe7m>{+x0v&nC|egS$Z7UOB^)nxblnhH$r+6we8PEuJ#-C>FWJ15fXGS7^7&D@mNAJZ{{Vm)?bd4a;i; zqrMW;pz{|MAZQT<8u69eB>*-5@=IN56pon+etk(#=+ z61-srYyP9r?3Jvw^Y>kAw+BAq|MN<}T${Jsur_b6e65||cU>UMyY!M$cb`RE-5R86 zf$M)79`T=Rg5%c(VC>0VARSp_5T{m<^M&*5Zh|3$5B-7DT+r?id~5KInaa0jBx4ua z{m(|`Zuo=8+YU$lY%94DGJ$%~A%8VYL%xDS(~f0$YwK99T4-8QfGpdqjBAcVyA^l$Sc=RpDaH5=H& zs0$=}-IaUXAt_qqJg(b3E@mxz@7Ex7m=Laa4T`q=PiVpmfB1?nC1`BP`!k#VI{9-Pb6t z<@{F?vju~7M_>`Clq%w&drcf{Lt)Mzrfrhy4ymqH_{kT*l972Wz58G>(Z?TF2uCY6 zqU6OmNllyr0_BPhGl6ocgUeB|ic&n{-hhfY_e*-}9>4^)DUP0*SFhp(j8=or8F^it zq#;hlwkGy@0!tZ?38Wrpi66%4HbU@^c~MCU9T2p%j+@rR5nr`+iZ*EMM`9>&et(TJVP z-T5coohLoAp=YkwG_&pk6Ct^aP#@wTqlVNnRo(V02iDVb{i-h9;f+05YuDK7^*z;#YRf_uWheZKj#!yG2dN+)O#qQtl%w^X$QH!-<)W z5VONTYoDmZ+5WI-OcnOGJO7dUny(&XVb7^LFXOi0_2xCwLxam93BS1NDEiYA_tJQ# zvG+Wwxe;D@1J?bPF^xTMaM@wgp~LWN^v&4T+u|jq@bhh=W-}~|XMm}T^_$uIyR^8k zIZT!J2Fgq2#5gYWXUaP{*o5UZQRTI|ms`uLd7_N2^kZi-g{2PmbPX8I+JYsO#Y@WL z6?aP=ApzM3xc0g*&A;}r#f=EI+nT4W`_K}S``8dw@scf!cNlhRFr@AkW1hArs!UiS z$wbfSyoHo(p{-0F7xm~5R_@gwJtK?zkJZX0g9Tk-p`;F))8cc#b1*j=nT0BZd<&`A zBHzoNk9hhrH02xj7Oa|ig`?`J0qIN>ohcp?gwQ(b--id>4o`>nd&dCknw8oklDm*a zBOBGpEu?k}toY*}#kwu**<0l6af|hj;4JOf#0Q8Z;}%j6-ysRS;FDpt1Wt@@L(BCV z{o0NH3BIaMfUn;Au0OYefN{VV_X%rCq%Q#@u|NU^89(MNDzC@bMy=wE!~RO z79=Qh6JV9^HhgEbV`G0c3}HWVRbEVM`i8ZYNgD`p2UX{u9>ockI?HeF=M+J$dcz)N z)JoIM{lm?FL%F#>aw~qs+z&;Lke=-)ZeI^uyGJ=P2ruvwgb0-VVduDh9=e5MXF0u; zN1hkW!KR6)n&v-Te-Eb9HV$4v9FrCtgX{)dneJaEn%!UgLw{J>!diFs^WghiK-g~O zExJe07=}maD&+~R--)JH7m?$FU5sY^$(6BGIk}No79!J=hL<PYuNr=)wPr;d$61dkj3v$H-HgptSO^ z_E$=d0gsB|VV8@l+JVQo5_u;4MkVo__`l))fQKV1}pm zsZ6;Dc$z4lP!IkfhG)ey6Y{W{k*6iWfIJG!vvB8%2y#Hw&=yD?76$zUl4)?JLg3PG zq~SLRAJsk%54d3Z%~;Bn+b``OVYjq@6+Q0W7VSX1QpBrV%~e|Qz6@(QCT&8)?_dmH zEz0aK&RC}(Y14%;JkvJ=Q4K{@Fju;RX))CwmFf<-P{v$nJSU2H`MAQ5y&f&(yMvU% zDRYa-?}+qB@COnMU(b!R68MD)R+fT+;u!aOYcjg2M#s zeg~cf$V1>NsYhLlOt~9?4$Vc6p69`z&+uql+mJ`xiacd=+pIkLgO!G3z{A=}8`gMP z6!8Xl*u}`xxPxqCc;wBZhsg7YvQIHL#lk~Caw%n)AXf4e9Iu!eo|R$ZSyJ;MQ0lys zlt3Y7OOsDUPf+P8DseV(7h7vKCrl8hgsaF+jL@7galFcW2!!N2NysHaJ57tIwLbw$ zZIq2Ranr3ziD80yI=D=}nNdm%6VKP14}nthPEyS%v0Fv25a}Nj(ro5tScMjb2~;My z3R2O~4>b$J#K~Y&Jj8J!JWF=1oKq0f|+O1ULgp`~GhWlFe;ECnRO{GhcoOq`)I9|I-!@1*#5Icam; zaRM9u4VSp6bE0GpkfP}*Z9MjSQLh1@-)8`}!Y=N1)W^GiP04c=EUWLZIFLO56bH5( zEJ{Sj+UCLE?%`Pu_kae8E!@1HISwEOKpvLluEmKG7|SuFr`PlFe%6p4@}npKi}{Kw z=6CKcYc=+U)t->noPhrDJMDk+r=r*W#s7dKmr~`GdhmBKj@ zs-t6gL4xE&rP>5aHH;G3B8vFsIkf+7^yCA-lLk0t8Jqc?!Vu|eMAGc$?zbWx4HF1Y zL(kG<&!Re3L#r(GBk&j~o_!wt{S42VHrDG3*aB1X8iA@3CA*oC$8e%jLY6~5vfXse z*v~~(&M%?y_aIN_-J}FgS^9%Rr#Z+IPnl>h_pp_xF-%*oS_wi*Mo8Wv>M=mseE>Xf zP)Nif5B_0>sk}`GOv4QnQbVHJiljVMY1jlxT1JU{D~iYmB>6r>O5RPX86>u_(?Uc_ zrcAUCbIZ`q{}lSV9cOWKxhhi({$r!N|-R??wRiU-2$$g?K zKlw4x;sxv@W&4cQ_#eZcl}{WYc86$o#NeHM#$#N%t&RLnoI1y=e(dASkkM_3ICcOe zc51>WtK4JUieqrf{EVGEM1KmNdCN{w&)6QN^O$FBU7K2a5<__AF%N_l!zaGB2^g*4 zM?GNAla$U|Cso+E5Qcs76HNJX$4V;fiO3iMtef#6w^;|1e2d zG!Ht_AI39A<7C5|oI=AJ8~lWS;Zepn72knB+^B(L_Z;E9tYsd)9}^|}jc4KVGvued zQ(V{6@JL#5&k+KG4EyD$xkjPGADx%4^OnuTU(!KfQ$DpTV#xs8kpUaTtl#XzrS;Nx zIjoDpw&7Cq0o6)~{3$tW_m?Y1{=mM7nsk6D~s?Tk6-N$c(Pu6)@93Lem*cbT9vPMxA zrLPr+2lEpJJctYm4@L#8a0LO$XnqRBi7feS&Ci4|C%k5+Dg4?@Nc;4`71BPFSau1w>UjE>GdWu!`A5#LLd-E$F`M(U;3iJUjtQ2oOc#za z5b^=meHr!Mu#S<$e@7LuMmRB;S>I?~>h^kO4FT(DbLfd#O@=rBlk4l5u%rO|Io%Y& z;)%h%Ar(}Lik?^39Q2Mz2EoW=XaZvST46LQ;#8nal(xhTLV1EMYa)Z{?!d2o6VMQn z$-(02;4*b3Jr{R-nd}VA#VO9Q}QI1UnyVTLw$>0oKSCr#fkhmg{(N7 zi&Nc_rNb%n93~;#mp|DPW(%iai}=PtvJZMvRkOxh8|Vp>nCpe~r0T?Fkr!1ki5Z91 zEhrM&t{47fJjbS)+Ljd|Qz^xi5L}8(=J`QwBMFzyhl{WD1SOK&!D_0UbD%{ywQ6FTa-W5W;4T_ zv@EnB)Dt#{Kq3AnVeTg3`G5;pRx(^p6qdwm^i2LH&2{f8Y@2Fq8}1fiwzaV{m$52K z-Wp-DdOqFQoGsoJDwRP6Sqtj1u``3k*^sD042dCR^TE{PSoJdJm3ra%6so+%P~P!a z0WZMQG9k<2)Dhk{u)Nz?-tq&&YHN9ymr3_PW-z8OaGO>Cf$$!xq(6hj)xo{5^yk5@ zln>$BC(DC8Yg86DVhL0kMQ5=g&Z2W%3=XYloFn}yl{D%QZ5~2+cUkCAi2GWAt|6_L zh1S!qp*zaU1S{KQp_L)a)nUfUoa5fhg)nt|1KQKP)S`Pzn&4YXN+q&hrl{B5P&y;s zC(4pXgeKa_S%T_}4y5Cs13JR}cCgBdSNF6;UrBb!pPF-&CKSG2CT&%fo`K~LN7E5sxyOag z!Ti21I3zX0w+WN&ap8(%FeXj>yAy9#A>n^l{7 zl%eInXbD!sbxB38>rt&-_%fLP(~L36TA1sHp)s^Ds4ZAM_5qf>%22O9*&1ACQuc$r zCHlhL<7)WzcW#;bht<~oR*B9&nJM4^h7@$Tlfu_l2T09ui%Evaz7EF9HP6WOGvxXn zCdvoegT=3dOS^T?w~Q9j$QPtvKCB@5<-;ONQ@vp!v#7-?&!6haoeI6+6C=iYPIlqV zBGA^Sk$meyj&Y%78HUAi^|^*vHz=$cVR#5L=$+4S6Mqj%5g~Nr_kK=BCMAR4-@#5G z3lWFIDf6Ok(F>zLAhG%*lG`11DuX0DnO}1COe|5TOJ%y#Ki>R!SKzsZJhM_cB%U&_ z`+532LY`)d=els5mFI4TTlbm31L^fF&Y1q3EXoER8O1XuL_Ch+X?npP06ek*$P;-- zwwPdm)TiBoi~5JC-D4*JYO#6$nlh!C&1?&@_eFrZV0DYd0u9?g)Rb~ z7>1|%IoWg>cvKWmbclEw!xKupk_Q2hItYQ%@37Js9>Zx^WUhtF3hvP3xZyO6<2THQ zz|u29#7ag<`Kl3xBm+@Mdq-&%QdYqta|2wZV}z7du*iHD4-pj8>=1EW2re>P?h3NY z`8RNYy@_4FBn=X%FgFEfA8xP`Zk_c^|D2d9xvhiFmlT3Gh1)_~JY`(1sVVGdT~JXs(?rNll;^ z0RQ5>!V$6^%_Ps%ykN}43GkUu!mYfTshs##V;6^6h3ZTk8N~NwZufLKS3PGF7AeUM z-fNV4Ls>kN^U5j}9IlTm&G)Jsjp*Ld2~MkM>Q;H^5{126?LQ$*iM_zqt&O)+arc z0HsWdsk3N=RY{fQc0&i3**Gyusw_#J!F&jm!gD#Xxf+KC89XY0cZz+>!8^HMh#)fWHtati@Dh5j7WLtHFqpJjRGBpVujpc4&>rMr@67p<+cS z?H%cFI20$IUh!8lB6N{(JWBdl6yNGl@ z)trb%6k}z&k>$3b2Hax?6l#6~ihe_?A{G_(NT+zFg^FVsp6a*cIN&jiLmqXbWI4mb zx|=WWdkNkjmeUpEr27<{3j!W@Gb@E>xVr@VLDw(lZo3rQ1V%KhV2rrlCwq}36Iw(10KK(3R18@0X%s`-` z2kKRfft1hk$^QV4ZWUvn!!WoG1)jmkGiw!x#8YPbH25?jPYcBpCTg|vn6hDPg{y3a zFs@`$fb`{R zj(oL-%0YK+Fzo`8&y6#&hO@DTO%DvKp@!7!`QSF63fP92SZ%39lYxMZJ#&Up#$L@a zuGS2{Nm{lFRNiTvoe%HMdpHUay}M`a;?SKAHuR5^RG)q4KrzZWSPcJf*&4=>x^|wk z*YYpCAOqwedO+*_(f1pk%{Ne&2?Un+GJ93&nEr!2n~_#7JtNV+&g3jLe4mO(Q8lV7s_COH3E9RJKe zU%2VR&z}K6QOI+D4TlZqERFlgM}<786wh~}$yS~OLU&cZ;VKOJ_-%9a+-;x4)S?7H zQc_4!gT#{=B(Z#JJRq6k5h>ws10XT}|0cijI4GGIB~615U+^s;O`)1(Eyrj@`l5Rk zB2`mJ(V|&aq)fuC%nM93n=w(_N3tm$c(fFca*%iy!=senC+4GfEI^>5ztx3|jg%kq z<${S2idRV2O!LC0AV2}pc+-$)Rw0MPQ)d5n>t2UE4HVD!q9o+O$8;S>UI9UYDocqP zk?oJLO{jq)nLkLJG^mu2&Ef4*{u(|xjhVyp?B0Z8Ehx4;MwCj6$wbq#;JG)A#vhdM z4@_azf!KXX${=y-p#Pe}%GTD@!`p$W9^Veq2Q~f8ydCI2#}?i4h5cp!re1vr{6*d; zUQQej_7Pu)574H+WleC-Jcp$g{CL_6-d1Y7;4!>$9Y-Z%-c~;PXg7Mnv$Klnj<1PG zRT{iwR1Ctmm6;-2JY{}cxu%ue0Uqfc`pcw{AgTpyVt|g2B*wC2H}sj$nzKSloWFoArH1eQ>~T-uTG0BCF&*({}N_VE>k2aBHKln zjPL;9a#Ka`XoIo@O^*(^z^bj@M5EU%-YO}rR~<~$9m*!pg1>imH;OQJBWm{(WgPZ0Ni>BpbBgWTccVq zKRaD>1f2FRg_|a_T>{DNItGW@VemQt0e5%YZ@DV#H4uSnEX2GTJwIR@HL3ClU-p4UxE34_SHoa?kmqI@d-|$ z>_nwY0DQtmrANbJ?2m;h-PwYW2DuRRl~;_Qf2&PW z?G?GAx>Xdze=ds2G7zUYSDYR$g0%&4ho|C=C&b$Ehw|#EE6FSD$c9C#obV{|smkdk z;vTjca?N+LQyOX?(gJrZPItvP6sq$aiyI2X73;2$KVN}d5W}!+Kkw(BTI#sx`0_=v zmPN1{inWoqEs}XZhQd^Y?J(Ys56d}k6y+i2Rmur zi{io{Iw+N1fYPn9eQ~PtmI$_M^1_v@VZ4fAygXY4+p!b9@0sq`2rm#c;&Rai@io{q z)3N|WRFh9TK~9)Ro_R?}!rM0tmZZ9YkY_|BFDH^`O@7CYcFGt6%Z6D)aCLeluLnjI znOC~sL<)k2z*c=dy_hxHZt~hTJ*-iu;u>{#FMba=Wsa>{(}R7!5+c9OLxT{3#i{CR z)-~GIwmFrs+NZrnSC~D_X{4wVEclHAgL)=kBjMWt{l2EN;2Ebm?LekdWYUb{^+P7} zZ_#^1!sa0C(NHn^Z|5`kH?D?WBlXwh?g3vn5fG$;9D#>YO@J}F?Ha^w;dhGtz5Tr+ z`!moNdo&_6>jHk+kSe~;FzbHF4OjttrAe;S%;YF1p8QW(>l=fGv9FWx?LmXShRs10 z)a4gbRd_kB;6WlZ-MR>WY#V=U8`*XFSf;V%W1PMk5)=qIdg_?Q7XNB1>~vWGOSHk* z*tTPN;bS2+1Z)xNWI|Kh2YcxnldsFC#$tIjP~Mo3x;91SaLbCJQo12`0Qy!a7N;ey zljE53$Z<}X95326LE>oL%jH3GgdbAJa8GTTyq(%{2`TfgR!!z zu!>5P7Y*%c_PO|x;uu&9&0UNRHq48!^s4o!dx7cuhG5ksH^`~9m#d8noL(YbJJMA~ z^Jdd@gqoW)(s}NX;+L?*e1mDg@12%=bSeeIuc6d`@Z!$~0~g^R_H(>sv0iKD!rofy z8_?0GVH=P0j`!ln!B@87R~oN)^K7O;t0bb~{&4=2)WSE7_|4+!KdU6Aiy?=`4Op|? z`2+E|D}NqbcZj-fv34s7Cse~DJN+>P!uyiE$`^|pJUc;nUmJeESJ`4Ee3J_A8$PmA zx36`uejme2@v@CS%YmGG+tQ>B%4?P2PJW!}yhUEN`SfA)a#B{wTDN;wCwr zpiPwecw}ii1g_n*G${PN$NV=eE!QrV)GdZ}-qLnR@n>3WxM}fTu2Q_kb~Ff*qbYB? zNzP#Oo{coX^?F9{*~lvM-=Oz!v7~9SA|=c=h=97t&pne}=Uj4XM;rWn!gy2O^EOmA z{#U!1ymbj^-eysz7k?{`N$DAFixodYHN{QsY6#lQ_F9o9E;huvcuqV%#FSsYeIgD9i zdb0eDckMDhJQy7I#s{iH9HLf~lktT7;Xj-5;mK)Y7|goBwfSGYQburM(x0lk+#hVv zoD}6=7d5dCbE1o$N;E17>x!7gn%vOYt6+5p^`B|kv-)0pYCaCTHYTE$bz!E|VEV!} z@UO$GJ0fR1h-+Yv9D2VS{+ILk;ak;JT=y+AFzNSa|fUZ3om z@cCX8Os$>q88|fAchcDR1*_{mWLNR)KXa=4hnd`6V z|GPP5)4yx;mVfVk^_>TghwA@TO`xtll=acB?tjIq z4UoMU%3tBk#8>ZyU(Nr#I_Qhe0}U@-1yZd0@z!pG?yaj(+4vK?Iz?l}*tQcJ!M<2< z>Vm_LdE;-UF3{sg(1KbCe6%k(g?}B&+9UXPjc2@%yj&l$(eOXkh01?u?H00sW~%Y^ z=g6-*PvH0gmS%iR^_qJ_Cq69~4saei{d$Md&0F;^wiBy6s>iEt2e(tI-R<#Jem49=}UdQ-P?cNpQpq%i4dPgl`0B0O)4>roR8?0Y8Zx_c-m8q9b+sy>L;l)QlLw}G*lovDF>Cpl;-4dRldosM*8Q4Q^i59w@vE;er10_4BdM3ZC8D}CZB{#C<6*N3J& zKd~tgtXY5j0QK>Hd*Hu|Nql%_Y?bxSf5*O%Jzb0Lu zY|r-9(u2d{@mmguzxiLsih2sIacpeY{)l^NX8gY$Z}!ch6Cc0c>yt`#`pe_;k66Kb|By@9C3oU{@g0muvyNi( zhAh~JjTe%15I>GCC=|lSo2-qze|4T;(b;bX--hPN`akx*JglkX{Wm)S0tP{dEE2>8 z72Bu?xWxofu`YmGH&m9YbqgRxQENC!0L4}Wl^PWluqHJ$eS@(2?jui#{inF2vsdb17W8`L!QTs^Jg=mGtE1$VeQAzz_MOROHt+L;J z*`L3^JNZ~MUvzCiQlQ35e4X+1-x$ewO~^$>9j@*sMVMpl1?$ve&9IINRf!R5xV!T9w`pgte zV#5A_qXb;axH7X|6_=BlLm^OV9<;1+yeP>UTGk~>M-7}1ub>dP{ek=4$+ITGls6R* z#e-`$cu?Xt5xu(U52u5>l zGoDrZ$8dQa1w4%Fyy6eis$d9H0|FMe!z3iXx|GJn3Ttu!j+6tjO9 z^S+iZ+RvHY=zcY|`s$migRdZwNY%ld-x-^uTJ#4DOHNe*9>6yg(UWEo9yFZBgQgBV zXg-Z+XqnL*)QQFBv>k*nHb?7?SE-tT7uv98B`UjV=L!^5J;np|F#M%v&2rSZmKS)? zW*>@@wCsG1URJHapR`pIdHCG019UvLM08|uK%={_ru*vVZ@BwX>vps>51$?Vo)73? zA8q;q!#>y{x{fHpJE~_8-p#tLuy^bsQuUMk)S52wQL0*Aae-n-Ju|foXRoyJIhuMo zFO_VE-FCZ+^Y5bC?mU-WJQ&pE)VeJ#Rf|y9ORI+eOf0hYYHH)8$XY}llSD0e5dX$P z$ME&s2hlbZ)94-bCyMVDXIl`+6wTc{&5V|y6;Mf&58Gr29Hyk1kn83*?)--u2Hv2y zVNTBkh@(7nFIv%^_Z-n&OYwJjP+-MJPo?B#Kx>+J=NPf_dI|ulPURTyjW}C1p|S0A zGfqseS7}o+PNCpf=o#6&UOXMOP#qW2r~B7ZD<_I?Sn3#rzQYTvpgj}rt7D;{xZ2FG zO_)0iw!l0!e|}3>ies64QwH5mKE;M=O`qS^Nw$~AW0ZmPfRqOLU3vCb6kDuOES6Z+ znzFtFsjXL2U*<(7_k+=#EJ{Ja4zb+gchh95*X|SMs-Gi$vhCYX_Xgwqe}7ZyF~kux1+&ncwH3U@gVIfSQ1lu@ zFRv!yPsQKi^I6h`Pja`5yZpfx7^t{Nbv&B4xlTVq;Bzsg75cV0Fyk!>LNne2ciSAL z{tG=}PVFd24S9!xOm!~GaNKp1Og9So2$$=2c4ckV7OTrR#@=7W7yn4c;ZLP*X?DDa zz_g1iiiX z9SWKXr1%gO-@~7ZO3_n8`)1o6IG4qqxxY)%jJip{LI8F5w-vrdr8l()I}kz8wm0ti zPc#08b`?fujv;0~*~Z_m&D$LcnVa*?BT#U4I(lllwgL}IHsL|3G{kNKyv=`(x2fZE zGaS$xB$Wfu|A?2idvp-;VRs%m-g94SBVX2VHPm&BpSgWnVjrS~hYcp-2{62+hhP{l9Y_3RkaL*ae3VN63q zyl=^%nIO7$=hvgDY$@J{2l?Clh6K3VJV>wVobaisgqedtLYKIaC}FBfyOlXrXC_wk zz_>11ffoCnJ+R;1t13KfV5J;`y!^T#wPsS(V-AdWk2r_-;4MWs;i+HdA%b$ zr1r!HcD#1q93L{=U*^@{X6`nT(`8Ekt^k;L(KTE1hXMXx&%L9R79(Nzzna>aAFkj1 zHH)LNOt!;(keegg-V29++)Vn7)fUvFVbQj)8@ZMsvKFN_#){sfK^BX44TRvmxRVyv zD!<$Vz%;dTt0)b<6i0TuKrkgzhMr1ANAQ%9nJAbN^&S2)@+1nx*i9lfN22^Q_sxM$ zExNvS&2_p0rx$P(P#xQ%`tkz4&XNWo|=rEr${M0VKz zit~bk*Q#AN3?xmmE0xB zWFnx;XPoek8PztM(cD}coDirUev!yp8olES%xVUh?%4}k5RY${f0{sX950rd6L8Xk zQtR$`o|Kce>_sNR28fk#NwA~Udxc! zI(#!e3juqZ?`-3B#zyToO9*T_$gkHcngZr_7YzND#b1$ePFbfa*}TCQDJkH8MFU^7 z%Rxt~MUh#9=DonDPu5hx56mot1!ba2X>|9F3XL*>La?VLf}Go2kRR!L7yt>@9Z=iC z7rP#)PqCxsTIv`8o4>!|>cEvc&ePE8#*jz%0~7}$3r?sZb12iJc z-@28XM)es880!7%f}aPQ^L}>*=$FAL~}W?W03Od-qE9m(`|p#4m5= zpHTHYAJmJEFkC;0R#Z&YJGYTRy_GY}r_FH$!Q~zB_J>V{1l2AmiP2^&YCf}}tS_o= z2Z?P074M8*xrCCxj1~ROvTU@_v&&tdrpbl-$#(J-ODb#GxjuTbt*_k+<1-=xbQ?{S z+VCqiGk~y2k*IdmlZfupn*bViPPdkCxTt(nDe3`bJ+y@&K zLvQD{$wx>Cy{#`yT^>z+XUa8Vl~fZw$wD;-gfK<)-u{6>-JL9M;)}j_==~fo$}@Ss zF1pR`rvsJG+UMh@nyK0rk9Q(nVDKMMSRxMxRe`< z#K+BQ2y7gYgsLOORwrlbNmI8izRSrdWF+A5RipwaOk_H$T7XRIrarsflfu--iC7c; z5iI79?ZGO$#RafU0H-i4poP%#PUXE8#zh5}U*5!zQ0Ikae%VL$*#N4~(ykDHreR#X z=XJMFF~#Hfc685-Ab&1EO2(RZ@ky33t@8OYio0#R`ZVip02)R#k_~+E9fte{i#ot* zb7<&Wy;y~tJHRMX2-AOD<+Dsa_8ZE`@GnOZ$>0y5cnp@W$>y=Dg&%VRvyHBFA zY>!Xuv(;aRt-h+zu$4?ST9xYaQyaVEj}jGdwg3&rEGH~L7|JXuE;V5#Z`{q66A=_F zxyrvqmj_wEw8ILKrRR+cvJJY3CKdGubKt;5`23v?Ps%k$NbNlZ(;_$T7r!B(lvme>6X`nLyt1n+Ba z+}~Z-j3e^oyatfCwF7VtV5k?@(PB6Cp07&&9 z=xrv{W)2}XcA**?yE!HE7I8D^R_6EF+&W=%+pB5(7sB1I>3-D5n9~K;#7vFoW*ee& z0e2&Au$dBp@F&87qGn)T1l0h79#vCJMR}wMyOFtt*kiB&%x`&@;y~>2_BZZvbL)(o zwB=F&Y=xq1vqZ2`9v;L7W1jX`)1?$}zirqEKQO9fG?T$ZBq~K|i|?Vd4LiRUfFeJm z6(Zh6{)8e*#<)U+R=&|-FPpFJsO?A8N+8xdx9O*Ot%RLdCz)0HVAqHXaKVahWlGx_ zPvQc6U6fk$8G*m011DQv1PdZy+?ZY_RM?L0X$n02I`igc_P}_V=(7-eQNMpZJW-Be z6bHAu*vQAw9d*aLPmyu>qiqW*YK26W> zhro>Gr-Zb_+b!Y}f^rtEgbCL}U&t@@fPqLkeNdQ5RcHmbotczI7^7MMfL|^nGSDZW z{@oVYh1%RRVi-}t-~QTN8y~v1&(_<_k7>&*g&7kyV{6dJlxD2+KcJV7uLXuiOzXpi zC)R3O{&ktei8Dv^iD*InpR3s}BWyM}G+@qCy}$&Wd9|5sf)1=VyMF}iDFr#z!n6N6CW$PGHI3p|NRNL!$}+5Q_rXM1Tz4`^1sMe6q` z5QlW5V6Iaq%(-{uH|(T9bzb~CP;3x@1T+K|dZn+Z2h`JQuSgk685{p6MW+r#Hfl_# zYklV40rhyzbL|2IV}eHVz-FkL@hwEI)I{I}-;3TWy8}|152DD(jIWvXs!q{$XnI4D zjXRs4(sXAJL|n|+h9V-*n?pot$}os%+i4x#nYTjq0s<^=MU^*bmGkDdym*g-ww?G> zm3KdsMlC1oo%ygjBDzpN#>cNf$`IFCL0&jtgRZW|?D(Q8txF%`+}7f*jm#WQ+*cAc z{K5NMN|L5QHdX2L87TL))p$@s$Ad1L!9~b!!I5M8$qX|fHLWO=C%=(S89>tpk{Gy^ zWlm>6iiVK^d}^N4R_N?DQ9g9Gso*tq_L!!?1vuMYbu`TEwn!C=g5>+E)+WLD)*5cGQ4-uAogdoupUBc;f&HW?c|_QZ?-CMLXQoL@SO5b4#MHV z(|C%us=oZOICaS;|9VwHPR3Wzmx-b}=wx&LG3aEDdf2G3T)wCay(zRB1#cQo{{ZXp zc<4R?o#FeSkGO)sijbN?xkB4<-W+mW&+HU(dgKx_+_EKVC@e9RTamSFe^bGjkx-Ae zst;SM=a;v1^q`Kl%-oQROGQR^5gCkN<>aW}Y+xF;U@GF?aG(^xS@R;)^ot=|ptD%V zii~WS(uvqRF!1%3Qqa~K5U-ly0u_l>_M;-gmcBQ)hBu#(6aEkuP9e+S==Nu(20v zEKRNHj6c}No`K`*?IQIQSPeTwRZMG&u|U`7Qe(XYyI_R2^pQENkb4bg8El5mdIfrz zn6|5{hYi6Qd1u~x6yI|C67bbbrxIvek0{X++E#KZ0V1>=u)}y=QWrx+iTEx7*`a;X zzPe1$0+K_WwxfYN<}{gl#Q@mFS4@R_`G-sjV^NH%boFOIs0~-iDA1H<&j8o0MvyG{ zqH{K%A(egOwa}1kD9(Fc#+MLmsm(Zno+2~5b@b3noztllwf*#so^wv`RHkTrsw>c1 zy#0)HAjM0icb(}>p4U=1un#xTvB1Ygz&Qh*CcyZL#k(x!Hgt@4jH1*W?f&H-CO}G^D7K}mOE7c`15e0iQywPYx6~Znv zMZGQZs@q;YbEzr@%LPh#UtNf1?y2j3!a7=rtUwG;)LzZv^>QhN>NfsfQ`N>g!r(kd zb2DG7z1+w469=}R>!DZWmyc2$v8G|&P*jfk#RxRW&2dMFZ02oVYE6mgG8&Buq5>3b z6Xl@Y@$bk+^fXa)#|>n;9k1OxMcw(I&x1i|DHwr|MX?YMu4Usduf9W1&Behu@fHn0 zFPpKF(Rf&T3XjZ};z^5acJV;Pk)K*u_*%6H?`wnHD9;kPh7XD+ zjKDj7L^`{|psG{mbm#ryC(s+oKp=fnRlp+ijefK>#Zl^iV8}b}XSjipvg2vSH8fT6 zPGfHG2Y%5CRFzkcH}a|wZ)E%HaUacI*3KtXo%BI{EtA#n#lO?!QYN?!X~oxx`xr25 zWs26}_`mFaHB+MYfazU5S#%bS_SYggdMXheMo&GWeduY2=rkTgX5uO1o4{GjL`?;L z7~YxcQP3anOO((*Xp6yYnb|YW6uxLR^J| z+hKik)UOf^%8t!%RNjB1DQtexg@TruMrd!;+<>gpI7uXNN(yZ7#?BG9yaUM7thwqC zk#zq6N4iOo>1e>Fh*W5Z#Nv}gWtZxHxAI=qX|USo)oSi^qCnk&KQ+tEus@iA~UH5vQ`DG0>8i+>9+0X9>El@L#UwKnsHqytwF z(zL~T_WMO=z}^P?KvbvpAYOIzn&U8fD>dHy2YMUg_&2J8TJ)~j8y{xPj7;>@c4iP! zP6ln~ayHS2MK<}2=Ase$=)JHz4vMe3dTI@-B~K9rLDRcq zV7WKl{K1O4UDO4p+=k6q4OwFtKM6O;hG+B7Gq4mR*As7?gqj$&;c z2DlRYmS$+d9`1Wz(}z(uKw|H+sC`uj@jz{V4n1jh;*D-NjZ=J!*+e4U;H)#|330Ki zKp}Cg?ckJp{hl4LqI*u>xj^4<8*tt8{Hl0SGosK5q9@J}Tot`TPhFx!ux2;-h*Hs0 zoH(0^o{&&*U0di#l3z~)=j>8y2Y+gwNigUj!Rujz{ck8~oBe$V z_O{sL#G>9=1W%FLo%l>ObliZaP;x!*#$n0RmaZw$F z<`*!qoubTGid>LIA9&zW0v%cxhp8vP2IafgX(|Mr$1=Lm@ITe`(TT=Z@nh}~m#Xb` zkbs)Je}*<)RPDqA^uO0d2EqMwyF54uEeMiPv~6H@r=JBNRmz5DWGU^V1 zC`W)pIKFzvztW}W zM;7SQcTHGF7#$=RN9=X6F~}jzi(rGVW8$Xo`V!F@)Y^&hPgyl++u$2)Yd{j^;3uc; znZPjB8shSecCAiH+{>;wJ~Z=BvRtu^VmqTIFaDc8m{kmJ)8la8hD2@gT*I%Pdwtd1 zaMs~s<}*L!)^3M&rD!JNO2S^gfgl}6AoR#B&JqR5i5wB6&OmD$5gv! zml2T*5NrOi(7^=V8>R#k*8Y}v!alk>Fdt}-hi>pgNMdGxySch8*(Wt~6BRk|xX-70 zp;xQkGw&OHN&+*!kQt@eXI1jkCXLYZlefcgGS(faT8r}tN7AQ}-2%BtxSZiMkCThT9vX){{!kXcxDdzR&cyX-G z7H>Q;gc8L(C62EOkoz(1o$55ObA!vZ>V@G~mDw)T+up80pbT6q=)E@NB|Q}LnW;5} z;Dc+TBw471+OCQ-&l3%d;2|4xn#ilrW_n*G&%6NYz!>79Nd$9H^WSu96Au+|-!4wA z;ggL@5B+h$delicSQ6>l*q2fR;+wr+ry=z4HF&cbepT7;sMq`Xcwg^!ZXaqJjoUJe zj_tVOO{{8qVpw4(syc&-tneKYr`xFn9|q*DFke+<6{^?3C8vPt@;N9&dYRgF0&|SI ztJGTn6;EB{fDU4;Y;%}IT!39nEMvS3Gq}&eWIaAqXE@22+F9_{-Pb4;#)}>}>Sgf= zRZ*a(uQL!jJhi!2z2)Y9SjDe^i{yr0X`yAyA>{gB@;5fwd+VkIU5g3oTZGWTEMjJq)o~EDh zBWkUt1DS}Pq99(RLBVxV9SS~(>hZY9XLwxXb385*D{}iyuP~^4_eL!bBb3>doW0ZF zXDh3^7Mc;JG?3NS((ZPv{zS7U+Kc9Dt40!O^7+-&qf^QWs+~kFgCEn}ojn*orb&xw z=y)Q({vE~MyRKcF(r1l;bK#9{NIQlx@(b z)Pk2!w>Xao*;U6ty4LQx_-{MO)g1N7WMn;rg-{3HwPtVx%*heiIHwq<|e>du>j@&eX*&YYTq+22ur zR*$_9#|;l*bX!9L-Vs@&Xkg6eX}P1RS1oJ{SNp~Z|0-Y&^@S41j9t6cW82?ww_zbW zw+$My5pMFhc}q0#0|s?W3+&&&V6|X&#Ipj{UvmSV9wa|EH&>6nJ9*3|Xa~s+8Wcb! zkt}zTsux5k*rY9=d2%RO^LEqyxuj_Ho9n-uK8@5k{BjtRq#a^!S5Bh+G0QW6k)A@+4twjM+NGo<+{hxFs7*Vko?!6cY2&EGQ*x=x%xv$xS4c z%|tcb2!Qtjhu!p3tNxJ|D17>iudd{gE|iWWoavI{76^gERL97-*z+(lo+ zBT>Tbl-Wb>KJ%(@pI+JCgwpryK&2n+XSp| zk0U9&zKi}d*Uq2hd}l{LOI8G%QdYW)jxpFQ%7dq<#EP$%1kuX4d$hR{CP`X|N~vql zm9QyUr*@%{7&-+t=oor`v@N&JBUuPN7-a7$;IgNMI|umviVEr23eA$Zpl&2Lge$Ii zkhv^>h%NWpI2lHT%zTUzkgQRHL-pYbj7|DvGL*|rLgl8lLaQbJxInA3ZlL1+j6tm1#wF%##^`hAGn1D%|5kc#wJ=hX;sF~Ar8{FuX81&#H@`d zv*b0(d=zCST|uo0+Oj0gc?Oz$$*bHlE(5x|c}WVP_6QGDXZ&b1J1**8x z87k%-L3wIoP+7~+FmQiGiKn8(g5fA}Wg|*_$k{$X!e%D~b9e3MeXA1u_8Cgd{uw2H z?+b}Zt|;-{29!7sm7KO0m5eDjLD6NOT3dPun8^x{v}v;r-Cmb9^P5=oUf_vJ3A>F- zIfF_O*r8I|>rp9_@%HV)+ZT-zJL2uTiMP+!vx4NtWqdEN9vW_cMmlK3iD5sKf8v6& za1(A)C~TQ6{u`5_{OzHTU(#aR*Bld z-j2*;{`jdUmm*jqMwJbR-z2xXxNx!?jZ}&Xsosp<7kuKmL_(XeN5)AEt4xA+cV0y$ zOm=`qz*nlE-pb_|YWQK4hx8T7GxQTQR<}^G((_R1a@3TxhbVEtvL#`3DwjP&>ik)`=RE3^ z`TleN@r2ftWuo-TE4g(XRM}RPrS3k;@^C3Ce*!AMb2Tcz*wb?f1q)+aJmd90c`%7? zKYo@?yRTWqkfet^0he`a?62i8PZT~XBKjv(#M32c00!bkgd3(Q`lfE0(!zDqbSoFN z)!rVTrs1Awnn>ZOz8#~`G$n_iX%hTy0qu}14aXp2)F|UTkwWqA{(y>z8-Tid5mg-j zIaDmUi1K)Rj>^g!34ILV0I0ww0oM2RcTqQnj; zan=cx82fA?8kS^Kbafyqx?_ZAmMQ6egTH?{0#!K~mC|_{l~Q8|r3f-mDJvJC_N~X; zHy3Xop7>L|eW!8-)IKbOx8L;H9EkYl3lxee!$6u!YWe%d!yr3S%T8hCRvy z3K4urfEMRrxnX2PfWY=^`$4;dx)o4D*i=OSWq+DM#mx6%Q1K2|$W!+QJ>{EZxoanv(j#QFuClVEBhh(mo($0PeG;sfJ*1SL#1PQ>A&HnZ$qW~;HAI7OP`Dq zA3}+%--byCup|NwJ;uZQimz3BfX(_%BgZ&CT(F3@_c z>^iF8R$OkFxkTuZd^3REVQs&pa>(?G2Iu5`cAwoONqOvVx0|79S~CfXkZeFjz%@2d zL_8{@>|%m`nj|4+_%v~I67Xq)OORkB?o7m|sWTQ$Q~haF-Q5^8O{DKYV+`|@<&4IB z4~j_MWSl3NgEbn=a#X};zNouQRB^5bRs7ZovPj}kmYPWDE9r?9B(5k%iGzF~aoI}- zByLrQNoh>-7~|M9icsPrlsLBlCC2W0qhYB=sq3DgqJ2=&f;Fh@P@z)7Q0lOmD0TH(l$wJQmz_{hMsZ+TEE{8)~ZKAYOL%b{OJiqfm>pZld>593v}}n4xypgrO1!qKYeI zsNxws)X9HF!@~IoUSgzp$Xs2Dy7&~LBch;`3x&{ERS2Ye3F)$0Ae|%*k{!AR$%<+z zR#Zwj7hq@q)!~j!(!uLajBNhw8vS0bhdJz=Y-ZvURQfB(RrVF+D!K}}lr>QJvPeaL zQbG`W*FLUCVU{|Nb?t)9XG{At)}1}aOe9NADt(=wxfSjxrR0;xN!FR!0#kS$R?a4U z?J@@l^kRZJ0IJQraHhn@Y8Zj+%XQ^5^2QbJYCQ<}%o!!dQ1DtY7L_6{JG@<-lv` zn>dh5&ucypvHYFHOq6=;nYJ|T*0w`4j$|o|&szKnR{WBFta1Uyx&xWZSocX*4+U0_ z1qFSJ7!YFl32-Ty?;hGxJ3ZE1pF?+GGp7liXUIG)^gH~i$1+Q1B91KWlkisVK0j}R zf=(;P$NJlHulv;L5B)zsk)LIij39vTS&LcBbY0S)l?vx-g-fTxwN5zb zrD9O4P|iEU7&u4rShgh0aR?bFl_2}3;Dqsl1h3pr5MVAJD{7e7LQUYY6UJ~8#_|%z zg(Y}LC-`I~Oq3+}rX@_sO_?EmX2Ib;!Tfa+S2rrWfe4uu2SsBnnyq=Xhx~C(r;e` zLNRPFoAa~CGNJDtSwPzK>|S-u4(U}#b+0;pTx5BKWRN2AKi2W%$zD~^b8uSJQB_#J zA9s5qnU`EXAuVi{-RTGC=oS&qw*l6tEr4|(IcI*GODp?qF08zhusF&B;Uy^(R!s6Q zAPaV`f@L&?T69=e)jO8(#T@ClcanDFMSozR$ z2fz(-*Z&%2xb{!@4a0vee0jKTtp1z9X(xnk%it2pBDfna@e=dK*xBq)s6&r_c*W@4 zV|KhGne@rPL@Jp?s+U_Gp$?_IxG48JLS2LHyugE-Rc=}&1U4KDg%hojaOwd(N!P;L z$m{U7Z5M~A3#u{lqtpO5-KJ#4_xJ{3QNI-5N@?v2NCx|Ci=mxG@)>TLB<)BB&yyV z?(pyuokKyaSVqB*G#iXgmEb`?3~s=}4T8 zKZ;YGH<=oypzij()Fo?#q?(JS4(ec6s4lt<6^g6wM1ub#HdlQeYSfDT2{W?*%qSz% z2!r<=wA|jvQ!_<#nNTj=7F04CPF#pv?6*RKV;QATY@ui$xch5LR-*{*8WfSEiRf)B z!0?;n;N;kkWRoL>3swB`2Dtl*cnoy`c)zUEUF~JO2m`z*>lvn&Fi|)2Qg?DVxI3n( zw5~xW<{VJrH#wylUg@rT1HDm`GKK76KSeb`JPA>MX%ipjv*3lc(DUk%se$Z%dt&V4>Z(N5Dv!! zj%gl?A!2U^a`@jC`GRp9X_@z%L4d`JT9^=C7qz0`gXk>^_>pg+8BHa}Q0C@4@sRm? z#(R_*TSU0p11<$qo-@PmS~E-toO@E9 zJU$CCdo$c%xonU87LEWk7Yk8D%c9*7abG#DFpH&qO?6Vm1 zQkM_>xV-Vzy$ZZw4*;7m;XJBiV3khw!g}AHII+dOX_Tq!RQ_xCEs;B+tu1-mphcxs zJNE(+M`|AQ@t95)!=Z&z&FK_~-<$a(VR;n8O>@C0L9;#dwf%UACv<62)DIYRw&mIr zRJ7J-beMrE@rx|sQiTp=J2&N`S6{nV4Kk%@@Hs6&hbRJ_cu?vN++Dt{Bgf>r7~HSm znFZXG=>1x?=3+Fs3&7Qhw^99Vg(IM4_ZpgXw8&%WuRx5@qR8U6D6(bPA*ev(`jaU= z!fYf@LGewyVochO+sS0a)oZ)AJ6QpK{rB3u4+VjmTXkppQJ}3r{B(m@d{EEJji$`v zE;s^FtKNyuMx=&pht9nheFtX&w8c|UL{rCMi1iX;9j>f*on_hp<5yOl0xraZBz!xtt5R}5}ON;;v-$GXu)taD9!jLugb-AckQq5QVNA2|cWx|*_PWZ+rc z7x#cEI!>2Q=a9ZYl*t5dO7QC7i><5NO&JW>gdZOb0vnRb{um0XUYreDB~P`X+ONqW zl9~_rVHoL1Pe#ZcaszQJ;WD(CI08|qfGD02IB-h0vR?i+8?Ki;x~0spEdGfgv8ZTVG&GxJ!d%|+V;)mXHsO=qO0nx|Vsp}gy>}7ltBFrRb}}kSE;~i-kIe(z z3xc-*SA~?|N(@kifZAt0QU)wWN({+)Tp57$CX!5~KTzTtk64trZZ1lEc3p0n1w+8? ztvCDyX-G(iQTl4oc#N^5v6;SjmKi8Z5mJ(nJ`6$H20K};$;?aN&5z(g5S za+CQRXf|jGg4RNx)6`6Ah^cOFv88td2?>n7Y%8C5e*FVIV-pfg=_X3;tB+(U9-wza z(wIT&y_8a5BOh~~8aGTP6bBz!7;gqi#%0PC95NF^i`XEkL?U6| z8iQUZV_(p?OxY0MJ){cZ)%1V<`A0~mP@cJ_$v3W|j=?G(GD$9WTrM|KFc1Z~c6*lR+ zl+jchyDO{!2g3&dj;3?qF7{REo6)=b8$K9)=%M0ne^0|@{ihSXuF!sk!iLE7-!ND) zk^VU&$nbfAaDu~iBicwnEELYKUPf|4Em^U3x%tpec2a^Jw9 zX-@EghH-FvyY<7n{lgxWknLPz$JSKPZhB6OrH@yV4NYse8w5?;j+*w+&QMu}txK}d zYqlUo|NZ%j1ulD}@N5NA;gC6S`?(A23dZ_wK;=F&-s~`V>qig1LaT!N+Lv&=W=r5j zjr;N|v?uT)Y)kI)Xx;#ejcmg_tpD5shIIt`d_e)-aJRkNc)Rs*N4%lJM7!_{aMipI zTy%dV_t6NY(9p89g=s}P=@sK-31{W_1zB97ZHtbM&K+=w^3=(BBmJyorkPoYaO7J0 zxm@&%JZdfdLhj}gzx4jM20DLD`)M!7j>41)%`AmBdC@bSUwKNQnh~|5?<(M{8;&b{ zQ7Dbg=fOSfW)B~gJF|S44QO~=IkcNrlnKyoGKaPB0!CTBwcO`dHX~O532^E2mGn;* z+LY0qC+Wy5tYX<y_T(rA8V2R;fMEC(#h2)_0IRu&@w@%;fe->_>pP401V|?CkrGXea7x94@x>0X8lzse?`wA=C_}LZ4 z_HLeT_sCP|gszWwWNtKmW?|(AXsg5@+6qc6%-Gnt@#(^`vq{FU7{pCKBt_5A+j>U1 zlYU_>UM5URbi8F)dE39c6+JDwKU`+dHj6R#Bk{370-qIdHuNjeJ{V?0fj%qZ@63z| zS|7yNRIYeDFpYK)bE3ulsz_&-c}f>YOZxljzQ}%XFWo)(EB5k~^3Q7M&R-8xT)t`b zp|yXx=QNjUVf5e8x#&v)dw?)qYJ|}qipy7MTY0nLr00O6VP<*xIlk941RX9TchFY5SU{3?%UDR1oqH{T(cd}1^jFFJ`p!Pg|5bj zTkwE}aS(Gyv=3^GU(muZ+sU;g*9|r>5gTn_BH~-kfZ{xAVsgzK%tEBg)>d#y6y zR!reL?d|MuznD@M+p}jJejjPXDMmtI%W2RZ7};+HRKXWv-SmF?_pN~5BFZg~Vy7*N24tM4oJYKK> zzp!ybG${oM*N5qf)ko;$EzYXmxVO>$u`WTp74F9a+ba0+#H3Az5V_H~LTHJ7Lt^jb zhX_c1tzRG)9F20tnIZ8o53YCwU?g@uZ2Mx+T#}}sZ67K} ze>UM<5@{2@<6p#g{7?SB?D*`Ta3C=jM-{T0_(RVF1Op7aPfskMfTh5sj3~f`FZzfA zI&TqCfG}VaYIDQa*h*?_2bmxN;ULx(1OycSMIbP}7Z4C70T7UmGz9`fOo2dJ zFCc*BemC+X5;$r^0-P-JXIK3sMj&uf2Lyh=fq-uNM?gRUK;TmDW+f2__y7*vo{d6I77&5fvmlOa;pL zz{X5r0ePh?Z|j5T(4 z87d^#>>IQ;v9WIOjVPfjz|itJG(ZN?7Q&(Lvf;Bo%V7=ObUo@9^sBfmI^i$dTb@*p zbc2)=Ci&tkn1agb!1NORM!*!%DY7sUW8xwQz~L*<^7GgOiKF4`T#U8s(8*)$fAu7f zFo3LEiv!c><7Nvis2Jc=i*k>JOW{jUVHV@1)9>~_R1P3AO@=`D8D53L{B#y0K8qoa z&fb(e;8l{f4=o!(n6niY23nLJPP!94jpqoom}YU_0E>pNx#OuAspqG|*Zc$zFhu?% zndhUo=wT7pVKP?RI`}-oML&?C&-vN#K!af);dHXsu;^#2A7 zVW#&ZnWCXBI2tPXcQmv}guu{k1cqi#GeH73@_Ziy68Oy*Y3mx@3jj66M!tlE%{x(^ z$oKHm0ERF)ifcea_K|(jP!f)Y?%-%BBeoZyQN1t$L*m|GNL1Gwz-acEXivHVTHNhd z8h{iWjP5KNjVjUn2ZZ$Gk}!djVrw(if5L&lj8g!YHb*{T$U@ba0(UUan~rif>;ypQ zh50`w=(U17QTM`CyGdd-ZXXS)(4?G88RP`dXRS)f$a9Bz7l0mNs;ET!_ z%2g8t?p^IJ3ZtLBje-;~jvf4Vru8G%KaD+BWdB63_x{tpknU=Ax-l8Ofpr@pVv_3Z zwDfT}-i(=D0hlW_z^VfO)CJwiS>TR!!TslJgwcoB2qi8*;i|unS2o$86kW-oRHQN@ zLNa$CNXu^oB*sGI{&99Rc$u)TfNH0;8uw4W1DYN77na|UIEe#7(l}m=9$6 zZ*B>QJ7`uQVy6JF|IyezIS{!s2Z`VkaHB~`h++H4l$ zR|ZUO7ei%r%&1aHTj&GQrqBe)1*Cv9sFz1w0`{SYAWUH+mSoxDm@O zp1VQ&u_xFN!q9a<@#d+~q_sjG)zVnL>a5QLB^pAF$kA zJ(g2C`#uv{H^p=&Rt&MPC9%lg~{nsyR34g09o=~>`vqSfg+;A(>96s|ff z=Vk$8@Ly2soa%+;Ha`a}C-gMMa-M+YuwGi7++JWVcYxB|0p3 z8OL%`&$aX)aixxM^jdm;?&dlomXiaPD>cD#7-(~ZPXDUad2PgUvPsZzT&p8nOTcp9 zg--W1QR+w#6k}Hqv0T*n$*sMyT-0;{d?Hi8!1P+3%E?ABR~Mp%5?&Iq+-6fOSNZQ) zZtGjXa;-!x_tF&0WqrhQ7*g>({|M$z$^(EjhN5k`mx_nH))dQS{VSHMTR9wn#91Pi zJ3+v5CwgN!%&6i4rQErXU`{B$3RsS93qYW^iYJ$V<#PW6mdi86a%@|@hG)@Xx+jk1 zD02~(`&5A;6_3OO%n9chR6Jw;9m~zM?XBYZ&sfgv(tpNsDW!nrxDH{M4$RptCxE$s z!*cHd%aKV#Bvb@faRp21T2>pI(Z*v0_GYQ2cFe|IUSa( zT=t)^+@`S8b&U8rh8WP5&FL~md>O#H^8o9TcLJ3-*fe7~(xlZw9<0H8C6tKGe$WfgsKh*D1&49q3HKQ?WPzDN9Mu?Z%35+7Avn=- zq?;QCfHQwxFXYGn6bq;9p!$5#FAN62$xx>YIGk*yL%uwbJ&XOkI$Mt_lbkr9-McQj2VHNQ zx$2|x4!#Ns-}{8~i@mD;AUcVvE^@&$@kPl7fH}}{vR+464t`=zKa$5TwMM}5tqx!Q zs>he_{ujV;X0%by>CYoQ#900dd-hEme{<>tY#PbNZSK@y8}w1e*dP;tIaCK9|1-WE z(+6Mfj5VtKke48GWq&3DGazM5vRdZHl#n0W+X;9W^Lzn#r3>yM>){%NF=?&iaoc-Ncg1QJxrWk4`?lSPF z%vxY+A>inoAU(k}*awyt&bv+hi$V@96e_9Oe&jIuglcL&<`>?NQr0gqJQ#VDu2OY! zWryx;JGuM(g&+FmzH0j*X}%+1m2p|WNLcsFSQT8W$}p?yEUTJ2t2%-8Gl}(!GV4D{ z{oV-r{YA=y0Z3b9`4GH^v$-Fwo99k^GU}$c-h0^SJe*_Crm&MA$>gh^P!|*yJ)wr1 zW7S;N$l0v1b>(?s4T<-i68f=p=hc*9$Y0y&b3AzWk*DjW1JlUCU|c1f7}|8KV4Eu^ zQOemP4H@l={A35`fHs!PTAQE6lnB{B(8Ge3Rt%UW-~}Jzt@{1W=0WJp5%Ng>Ef5=> z{Oyt4-5FfQQRQwy#qMLIttVcZFH1{WnoQ5^G;gqvc5_&-u-}^PutVsu#m)IvxN|(& zIW58Y7lrdfvdew9;L329YO-f}M7ZM_mqALGTZ8xCp$vK&exlKB(D`jj-uB#!+bg`k zt}FgJfa;|*+tJDwlM;Be!&$o*OOlQ+%i@~xnB-v21iSH|VqOd? zW`PrsPaO*^%Unj``9|tW8u@mi`5#m&8B0Lx;IAy}S0pCw1TtaU&j82m{il=9Uud?@ zoue+gPfZA9l}lK6%5V?!-t@w?ZVa=2$-Q8&Oq_^Lc{THb2e1VX+%G2-Y$}fqUu4gS z;4%E`#V(vY>cFC#lhVnKN2LQ#N*yndo${nkWz(FMn8K+{;e0D$@?GkbN~%lsG?yAG zAH295se@kL|TE}tq%9Fa~LvBz&@R{4nB@{wmLo+YwT zd48Ts%ILd(qpQnDC(=G?lm+ae%}P(4bu@7kw>a$;<Sq%2s-GCw&vsEQVwDmB+5H0?cBAc5Pm3wuNLFC)pLp{C0cRuFHqawk4ZgE;9Q% z$WjbM$o)3HZkO1@66-YEN7|jnG2icNegIAc33jIk?fwqvD0;VCvO5!ey}{4i7TAwP zZfhl+_MmZ<=fE+6MJm)ZQ*TqOMuYQ)EiKrS4HPn4e)$bG)>0>Lg4;oIs$4RFmP01X ze{Z1{U}biLE=YAKLh6(l$lEAbt{_!x=6$j@Hzt8QgJn&lDFjx^5zz^J4_{8itvZ2# z&8jRrD)V7~X8jFkj4zu_qmCDxAz3HdBv8mgaK}H^KdF8v*Lvqt#%l8!EEX!jS(#u5 zU&Ia@cy=v~K5H$@8nDZ_piNmcY_KPFKnjO@XROX@}y9;>62=WwqqA{X!L69**I)%Q|7*Vqz8X}}p372z7IB@9C$qMxItPtbZ z3d8i)+APN3c1**U!`J5qDn3T^*s+Zf6@kKVV~!7YoFDYc#PzL#WLgSt`Gk?2rx zP(Qr)HZ$*#fP6{wTYpk!popyy*A(;OClZvS*i%n{L07+nKl8o+epm&we;=dSRIE8o|+j(#+Ybp3;#QEOJ3XW;q-A`@g8% zq0=}LiAVVcbf{GXTd;uBxn4+N`3X2P_n{L9OMtCKlL|j;qYmzG^*2tmt8wkjLV2(i ztq;*w$^*{OtmHY?G!HUQ{((Wegw5OMH%O_&$wk6$+D*nSdlpG#)*xey;xi4@N;*#A z2PFNK;ASA_#t|>6*@En3n*=IQY!G$3MUQ|p#siV^7~fHgIg!i{VTV?S9SOTthRFRE znID$TT4R1h`pt~6w50oWVfd_>E%m_sU{sT(5nlTFWvsK)EB&(=?-yDa^vY7l&IR1G zay`*0rqZc;g|pOAvQS`*5C+ic#)#yFVa5nW0No5C@L3;tD|o+Q9i(FmN6=0h=r3%( zRA8ZVG09>KAHXt3NamA;#)uIE?2Hjv^QHSN4LL>)5L6gOvRSV;uVdu8r%Nm*>lWu0 z!6d&R`;eE>)6%R#7-iHyjKXOK z{_=8Ou*|bnw+ySLEljj!2E$!bM3HN1ZqZ+4p1!V5BNIb{o`5Z>*- z7F=gmnLp9nYH~Lj_zc;U{yQG^AM9r6-WEGpfX~@1(swr%MOljTLu3XPs{A2q z$f^_bA`C>CP--#9?`*n|qOwldOicWKt1ssYO#@=y@QgYb!HC>}E6wY(U-XkQKM6{!<@QYD={as#;Pl{K z+n|#?aNnpE*m~N$Vq|m7EgY2wdfi|(BiD&`a)`%Tu7Oy796v-6$v%i3$jXAurKAdrx4tcyeJ%QXp%7SrEm1K3Y3h?_`;a%P*s@i<}tH*;9-=)Wdv~U z_D@p?rcblo&tu>g6F#u0)AFA!X(M=!I;kXJ0Sh_eK5}FwgI~3R)Rmm?fIdjK1-M{A ziX)3J4#|g~owMX0Y-uhdpE$A(2RdyMl0j3yq(E9%1>ce5P-^DqcmIybtBR1MqL0d2IBSGINHHQoq6{hIR{cJjhxg4 znrq2+Bf%8_9UB$F=-!X*1#qJU_ntjl!cMP7heG=!*N2Kw%e`<-04etpJsD5-2C2@u z+&SNLAd!T$62u`ksH)}QoV6s}Sr-x8T9jFKBxj9nv zEeUmJX>_YqdJNAk+&LUdyVbk2r;aC~wDj4%N{iocR%WQI)x!5;(FbUb;8Q^JWL7!X zuzPVn9qj(?QW=i}0G}tgKAA$zgk)tXnUHnIn9TQ5{jgUN2g6v5k&Z%E8t>3hZi4ii zox_HNv&J&bD5U=r-kkBDc=I6xZ!Y-%z?(buy!q?@E#B<&f5@8)IRj{SEv?*f)*Oh- zd4ck`Tw1Koi5#q!Am`oU2v=VJihpzS>=*BOGuYa{M-{#exfFk6k4;+Z2<`AE2^U`W z`+_#5r`d<|`?J^DUpb1=*Z#3C`17~@A9yp;zAx@qetO6CQ&acYyxdk(`2M$F|M?S= zKigOs7WwQ4>+YKz;fI79J5n3oR6+7G?U#RcYdA-Hcqm;hl>8)x13-NTm?6bAzG6NDRvmHc>Gr(?k zpfuVj5B_DxGDh$M$(xK3?RIv?h@e2}$A}I)XJbTkAUVdEqtk9k9A>B_El~O~=5ISs zW0u@N@>j+zJ$7Ekh>AezS7rky8<1FQpmd0pPe74Q>`WI-FZBZ-bu$3ySq%5jm*|mr zGI_;-p_VfY{XB#%%b-Sf3WxFVNE38`gk@%zyuBfMj00b*YBw$ z-O0?o_xIf2|JUc4A>Cc4y1V*R)v5Eo=hW_4wDNq3R-S@bu_q7@vJJUQyD~k#vwSEM z>E1xSN#*beSI|2{{<_ybTR1;(X_c9BA_DsL3(syYtUi6eJV1nkM>=Dt`FG`f8bcvW z?t{!6Epo^12JR-kSn9%RUT?DyA~b%0m;e;1F#R*BTI zsgKxvfrvMc6>vWHVBNRyqu(nz>I_9U)|8AG25VfuNh~vQ!YHR(|8YFKsbeC#GPDi7sH8+$XpPoBA27$NNioEiyX$#MzLdQA~8wvbF&$5YWdG1ixJK)ERv(H zUc4UAvsN?kC4@{OPihx$M39>9B(jJE<_Kn0*~I}uasTEAqu`H^DF&{lbJ5f&@_rse(ouR=QtczKaTkaEdCIU?!(@5XRBcqT|b># zFdW+x#mXU7Sskt5+mjvRcy}&!K^sl)N?j=IZcAODEmjyWob2xS)YGuAYMc8L^gbLB z&7y^i4|GksW>`?skDDo@$Ub&|hLF0TIBphCU8tWSp)Sx*n6a0T63=GP#AF)}BR_#d zN18;rh+12RT8P6w%6p!wd zo(w6o^5=+UXz86#c@X9_8UmS06m$LzG84kP)3-qU1QWMEC>GX>v>aFn9@L1gx+mSS z<3t&xAqHkQFb1IPcGOJvMKHoi(FcpvMmDDQI?Y-pY2x=bgeb*`A~Z#GPk>8n;vBbh zu|Kw=FSS_T)n)86$LJp^#SJ;94vufx0g8+3J*d+L1#~4fSN*0oyu?|wBRoBaR}Z$4 ze)%5Y92>PgjNuO^RJ@<4R^IRw+)NkYiF3v+#!6rb+lReaTB5L6##CBr#~dGNFc#ak zlg6UzJ{XJV?||7g0gOfQm}z~%ymQ8Tn^=q44#-;MY&Nm)N@GHhvDh!jXe?qEbjG5J zV>A}EtvX}zplM~lIUr-vHpQf}x)`IeSlk7S#ozDXV7yL6)}mDtWd+SGsm@)ua zivgkrm0JGe$`WV!ufog)fUI!H-rf?Ezr%00OD33zV^W!Yts*U&rK;Q<3s3%YRc-Io zFvhS)VfVV<~_p*YFv zK6XQ|zOY({mHKa8dpn{HIwwAUFYb*_K;n$oVx|7qg#26I!Dur_0R>^i6k`!R^TKS*=xk?f%YoE$=w$C=6^-p& zTWpqk4{`kD^Y?OHhOMaI7XvI>*o25x_REW_>a@9=S(N#%uG%0r&9L3>viQEPg!N0 z+I@{v`>9nG|I^eCaTpKU{`$5oIr%RXokF8#`p5J&8GFyM?!Y7;bDnjVF3S*KVC|>h zFVrJQ7-&o2myML!tw8IQE}Jdmqrk|k^r;NLdVa+iC=@l59Aas6GLw;E@Q5-DYLQ`3 z4~D@?oncV$kTMLg+l)(X?s~%jM~GAHY3bnjJx0smlZTeF3}`ybp!pw`LE^>!L=j~f z*dxmT83u>{i(ybClM6pz)VIYfE(Dd^^4kmMccP@1$HJVPREQ6!nZK(@I(F)^* zW8EE_NXtN_vkY{Gfi^m~YdmQfXbQ}NsQF(#Lr7f^A2SQ4F5ne2B-9nnF|&j{q-9WJ zVj1{Rmcg?$Nf2cjh$n(&@BwB=#m4`^FxZ0(1MR;J136_F2yw6s3MtEAq0us!Pgw?g zyG<@#LRtoz8UMXyKp6(9PRKBD`G<_%FkN?;IOyY8c>kq-CJ^ zr)3ZgmcfHDTS?2HnX(LMU>R^rz%o!F%fNZe6l586HJQtay*k67BFSVfkJB3lE+&RS z$2nve;M%<=mVrob8Kjxkc1mX%+)p;Ct;T2>5NE(L2)GB2BmK8wARX5GhkqCbQe+s! z{nIeOMLcbcYR9hsg=KKGDgL_A-R6viG=OQ@uylfslEkYO@H z_*PmX{&9VL<^rH~aE?Zf853#o2@DW^*A8_p!7Ww57{3VqTdg-I7E$KJ*Is&aBI_M# zPR#s^G$*qDLgvJ7WKKNpkIac2WKPUrQBdZvyJGY}ZCvfQj4ul>wBSWLJ*Z%h|<{-hxN9E{EESrkQr==BAqR< z=%2Pkl+Kp;dKTFdMa5=!p88bf_N~yTp*=ScV`12TijlR^bXLxx(d)Yp-#UpuipD{O zJW*r4H+KK?#DaRno&#v!nomII7Xulw?w{!-d&OJKvlf-Dr>IU z|Jq?NhBsQmpC53)aro7oX^As3POw>mViy;)fh-6-s2CJ|3;*)mkK=Vvmm~u6L~!7q z$fR)1u&!a*FvR_f`_!<>`7 zN>^Hpk_8~B7-O#%c*OeiP|^pDnYpJfYk(d@E@&qgo%!}t#Ft@)!iK?$5#fGZfBqRJ z`!uA9kesw^@5WXNW+AI%zEHAUB*n2U8i)oQ2FqN?>Ikb#J%--l6glWLYmsDkGf5tx zSDOKOU_FZ|dbb>rUrz0#$S*Ol_s7)%^>!o@iN%8q zA~jq-SaXS-;?u(UoR6eaPnrNr+e#9qwPVwl4<=m~Q3BRg5Yk1_D(nx2^97WYDnTqe zcH2OPp}(fcFmwlYr;V1ZPVB%|$f@dL1T5xd@)<6M^Z6a*Gtd&n5~GkKTM_O>KEnzD z3+KaoAwwrdw2WG-U$SsHW=)bYjlfEphOO5V2!S?QoD@g#fU4qx!k9!OxsKOGw`H((xD+)5n|CbBw8MzNHOJ}oQ^U1p6;ZY%N+^TGK$od#YH}g zwyf}Uy7gPiXF(er8vyh5a5P^I7rmBG{u6u_&ET^TZ&NsNAmkGVc8rr?jg&D~p;D$z zK9D71^>C|*;kWcNxIMhvG-}~ZS%0S(PP-fTu!F>PCAcGO*Nif{BTBAIg>VjAXL7FQ zx>V=3*kE$5`MOjJoZiImrQ&bHW=^%5_2P;xe`k*mev| zkEBE3lc;3|Qa*_(Uf8Lrdt9`rAJ;O2tu48cKPEaTXIK(p34v{dlqATx5Qjkd}?8Fia2qg_c;a4YDu=xfih+0TKohu<%?IQurY_3vCNvXX!+&WT-Q;7voLLv`>mO}}hLQjl?jwojTgs}k_xHB;wUN6K!TP_-Rd{4GKmtZ_2TrUgrX|3 zOiD;SkdPaNin+>U5Hfh*v|bz!N-C)mD~ZZujE}VV5?+$CAUZE|foRhd&dmkUuI~B! zNY`^wrD`>Uk}0Xg_|%YAZ2ZNJ+^D6I!FxWiG?meqX#e zHk4FHB_^SSXg-A0*nBlyhOzmAy*c)jkf0iqQ9^PbLTeDhg%aW}mtkzb@!p)?lvF@9 zmP$#{d zbNG}{MKu;e35jrjLS$^eKe)l-i$A8-<>5m5Hm}<@3|!z4Y`e z8aK{IbqL&=>1t`Zrdz19Vd@vQ-BG8*)IWnkgt4bbZIS{|fdhR|O&iagWgo*?V!@5H zo7LD2;B-Av){z(VV&&2BM2C2+M2kgQ%r`iQ>(xBsuAQEb=pR+4Ug@rNf$^~g#u_`R zb9ZGUyXgen8tcw>pQad0Oy^4*l|-gX>Lx`pbA_36cX)U|sIzm7JH2iiaaqei$k_yoC1icRp|Tb8ZbKx#s}Hz&TG93Hf{F+m+Url>{| z@~CW*De#@E%tb=Djz0X&o3n+IqTRs)C@CzMXcyO#b%=rqES#)^=*nYkqHY}G!|6^{ z;dTd;QbOurLS+>CfyYRsgd{#Vrza&9atD)BQeat#dc&=BUxMk!yP3yWxzp0Ve`(=- zeT&u8`VrDvd1$tj7PZ0Zni+4}scz+@skMcz80Ip1CJ-A9)VC=9#a)uKQ>g-5+4SKS zFu&tEv-YTc09ee!d8fH&BwY+Mc2ZfD_=ER2Nf)50iMmvH)O>Z^V%hxD;HdoGL7cI* zuKk_m`lq82b9m9wCC;k;Fcks896l5x0n)e|eM^qOlKwELLSy$DN((s;u{_X#y+{jZ z<7%B6A}y58BKCn6u0&e2(z%C03u(j(-dd==7S#^KbwlkU?t0(Q*Twd$OT-*y7psVL zbr1b*)x77Papccuo^p5208q{MU7%73#sJk+UV-OP^#Ik>Od)3D{5YL%wjtfTYnIak zXp8(VFjvl{C6c&>dN{M{P^87R9Ip>M^{*^rUgj#pnT$jw9djHi=;0IEIh{hQaeLO# zKf5LINXH!isJQ4qL7PLnl4j=td)9Cl!*T%^)GihRz9XIpjo*k_QH}rPSB0S3CaDZy zm3St@DtL_fymYA}+uh33YNME+2(vqPqlTZT#}$RlFI*)#wace6i!3*iT}L&Q5Oo|O z)qXlp*_cQ6l{vngBs|;Rz`&9Jj=51%*fc_IlnUc1=Uvk?<<(q~PYARELNg`I8N5<= zS^+sv&)_lW$H>Z;_;T1qWZfmdW0;zJBHDC9MG4`=EFPomI4QN#mxDza^jq!<){1U$ zsMoqObK>ou>R+~qILVDy9oulPF@aVMz>UK3rgNr?8#KwaKIRLdiR|MY)l$c?|3M$V z)&o9#RimRpn`F$>T(B@vr;-t8xXCv7+ATloLIBiObzXd`)NkBvWT~^sDrxToF_)0ArJL3|KnkOTNVd-FER=(&Gn1KRc zkdHEt*HO=c2dj95W_s98O)RveXxdWY5tG$+c56${ky$phgTw9Y+sD?eN;6cYZt z(#Ep@UgeaB-y%mBRWKS|_<2%V$X?y(lGmLaJm@#snz((V4%JaA@9w~Q=5qfixL{>E zr?L8!STe_ecP43`uv;O*I`bhchppd{OWXjJRBP4hgO3whBOa`E$nex*PFunlYX$iM z%|qqp;XUC)ddkh`^&BrB#-GBEjlS3|Ny{*M#0y#m%xyM|TT=F6d%+mH)p_7BE!m`5 z$y|SEdXYnX?~5kP-H{Gl#0{QrdM?6&`xBHqgwEk)r(EE`9S;Rxm==t3;7){sZkJ3d zTx9l{l9aeRm5AvE( z3P8VK0z-Hito~)z9n^}tcB!!DVRE&N7WMvmW`y-=Rwu~Rln>!_NS1W1@X!QS*|fsN zr)7#jPAA0lqc;2_Us(htze33lD|gNpAhw{>yxqzgIQ<M>2g8#az_1I1`I*GA6gTY;bGZ}8)MEX&x@!unz1uI+nmbzAJ(+_gnHQRZ*r{`efW zX}!n3oXSC|)tVlTxn%ft@9YKGi58u^cX*$AeBfkA$_pr;!<$lYD4z5JhvLiuW#CXe z2S1lRMn8)+@N@f8c{Lmn7;L^v@rTS+8y7LD}o>JtRRKqK+UL!%jrft(> zrjtJgE>&_#*XT?Yqw(RY4Qn!-e=xHh=G65a0J^Sc(Dyvor4piIbRTWn8kw%H70dpO zwfYWnM7N#TD_IBFjzHgabU$5^J=BODXD0RRs{>9dd%Yq8u%{KXbEm zf0jMY$?$x|vS)OrW!&0j=Gbk;od1F)&)TNDOUC|JEFVULAmdJknLT4gL`E9~eK4j* zW&Hh$)e}ebZ{(p}3V#hA+GzoLv{!Bk8%a6S+WbR-bS}D}0Rp-bwr&D4dQpsiOGWuz zUUy8l$Kd;=MOwr`x4aAmf@l|)7X1VB&3uG5A5vGayI4y3jE%hR!Q1t)FRsUZ?LEL( zD|v_t(O_pL*paZxH&l-$3-@c3IuN?t8|_O-%Rm9P@8=iM zJkhRUs(u87`O@f>hhDu8ELy|nq_%%@+w^A)tV2Bm(BGSLTU z0~)~N2hpBLl4WR@rI!OSh=D>IB_%bWW~)KGeeh&^xQMqYh50`vs}|B3;WB%6=nGw- ztbZ{=wu{aoWP4P#4%v>15yOpzzv#TwyHRX2N_&I#0%n_TlNAhYvf=?>P4_PxqtS-y zeAFH~AN7`E=hv+HEGMBpWgt*3J3?y2cEC_mwB|nla(!70fq7@(hgnBi=F6NeW{6Lj z*=MLqSe6-W9xMTUW=5}TtnL|s16k1-)~i{)aK}DCmm4G889T76HSb=kI4KfOCS_<5y(ek}L5j+FKr1z)_zND5=>Z!R8@=mp%*iz_!KpYKdw zST%SatMJ5?_s5=&`1viM@-eTyj@*lGZGz&o(qF#sTvgRD5{lRKw{IxsTyL(0;%*b~ z;)9Nr^;y#n&jQLpVUz5p?nKJxSxr`H!Ay@T@n z&w0D|Zy1=>dwl*5~l0I(8LSCm)`0y;F#fkv07G>WN44h1knm zy4JgnJ^!)fTKEyD{(RsbFZTmyjT~Pw1Eb#MZDE{F5o>TqKD+j^L);wRE*ts<2LNz>1 z%<;z~tKa>$1#hnuDC(X=GX4V=H=!-1dGoes7hOz2ZK+y#qpWTFlOc#Se6)JckY&Xi zQ$B+tG_(|V96g-;7TTum)8OxqpUo+2f#MyX&` zs{Pjo3_1X9BPu>0)evZx=LT()+56mqK6~*O^HAgXEk3Y_`(pSF)Hox`@<)7JdUZsypf?J^CzmY+A2%eT54%6`5ngu1>eXQ=f3Y{G z?(prFKChv?@7S|3$68KoM~blYn|J4v#Xa-BL;2$T+b^D-OLzvw+s@a$8#&~=J7PS4 zD}3>~s&2#BM4)TeKNzq-`I6m!XYVoEf(@0|pB>-k4=)@4_2`?rGIK>29gF%qR6l&P(q&2MiqLIU0!`qX z{DwUv?3#Lej**;r^Q5pi<<{YfEnTZT-JVF3DxIC6%N{|+2s{qt1ux?T7r-x#A0SvBzgpBG>`M*o6n?)YQ0N#i5JtU#XU6qb`OAdgZa^5^ zlxeP{pTBJ(xcMh_2RDD1&doo|g>v)X>GHOGKq!S>$=@?ZbR9d53fvZ6l1`+2{9hah z@M=v=2urZ$lDL)mBzK}X(}jlo-FOFods7p5_~Vg>f5tu<3b7NcWD7{ts&yu1V_$5@ zW^JS}qIC+bE4dOl*@H`6z}C?lsSBrj*isin>lDTdzx8lz^oo#TI3i{WS0GlcZW$z7 zr?UA%hH}$io0Vph{+MTTgwzGaYqN6dLhBp}b%FlIY)kW?u@KiAl2*D zpeSroPytydeXoTRwK=tv=3MKuekUl)HA(%$z(obZAy3lfKt znh~P;w{liQCgG$fq&ru+qigg+N{iz(7-nZ!wU>)*nX6$LJ~lFQ0c->aD}H?jf@wFM z-p)(MtYFzow@UR_eMRxHD*z;}-Q&Pn4y(5$g@a(ZwA4|dVho*pFT(&-wY!i&Rmcdu z%&pIM@CK-|gqbmMc4@$ck$mZ5B*}`ngod-4kjHb4=Tuz68OjxHF*!%$5juixn#nmS zk7$H){Cy_pzzr<6Tuq1$8O|YR@_o#T$bqp>@0fok2mbK;a2=1Zo M;Xp1@A2;ht3;ns*yTLVMEwHUrLU{>wGhWXoRR1Fcv2!s;jllbjGnHm2q zA#(vF&ET}GiqCXuLnB8slK_^DEPi29Si(r!CRJ5%{QPtp0=F8ixg!m}9e>dfAJXd` zdd|l#hOcJxBJG02w#pel6m?~0;n~K{Qi;5)p#565(7(zisTBOV@yfvpl-DYr!Lo$Z zvt2FuZUC>3cPp}o{D9o%d7$qHWR`%fItNAxD@B%h{3*-*LW60mR9mOX{f5B+1LMo; znqNYM_1M9!cCZV9lFT2gWsh(LFkRn)+v+8Y-}y~fz|^7}kj>})$U5Bm11HjAJaSl# zrS}GBC*fwc#dfSLJHK&*RPUoIm-i!7&wivv=3lrB@ka7P-ty#74n~?2A;(%c2bDB; z4jHT`cVb?$z~4q19QGsO6O%{O%0x$q>H47UVa^OqlB=YgxV;yhO6we z%zbR``+li5IWDAht0%`TXTRuX6nSH;7oqKXG}fypu+$Opav1FrZKc6iX%+M)HYhKi zB1(MmtnOwFrz-berMqw$1=X>9HtnKT|A<=4y&X?KQ=sR(hFWQMETP zH=IIKimB`(8=<#l**x>Hikm6(*0QV%9nMxiTQhOzlX z-khG45T_cGQ$ivi0yDP69WF!Pd^ihI;e>c|`cYDD)tD+OPdPTyVoXE{eJO&H<(on| z?o0mxB}=MJp=7{M`P7{NKb0C=;(;lYJj>ezN{*|>gi$z&lhOQ4q2yXqD0#lO36z{! zZ2~3B{Y;_cI#Vcli7Av^VhSbG{1KFFiS2gbZ|;ig9Q!oFp4YGlSedTi6IYpqQFv60 zPf{LO=lcQwQFM>5nvqY!QRSjBM9q*j(Y1|iXU?F2q%g)TavBII zQ^5#>FfKK(N+{fC+4>WV1nU?C9>$(ovI$~f+ThkMCD8q+%5JN=vu5hhQ(3sGNggFG zf#^=rK@@P(cLn(jp3QgEtD>{#P)vyifH60a=!yI`_(90&ume6pK1POD6P?_8R|?g{ znV-0f912}*@#Z8@;9}8j45Ls@z*AL}6ktb;1C-EPZ_YMKsOGk61D`soy^V!X_@sO! zAu^(x-(axVFa|dam0tk4rldMJ^f&;ADq+yH7J?XP1>D%?~Kk?sl<1 zn#5PyNXL%l7-mhidK$9?kevpAEpfZG3JSMm&f+}-+)_ROw6NJMmeRtNzN!LP#UOy3 z?zo=R0HOAG^$F0z^GFM29i$ej)zg_5q4rh;imIJS7>iHLRD)WM63t6{%Dj~x2$;t@; zT63t%r2w>2l_vqtN=bprYRqDjFvzA4jcSLhv0RnH54so5X-f8t4v1yr@ReF(dJ$ zg+x46gP7DXtg6I%xMQBMW(eVe{BuQ59Krin;ns2h52uMYG1u^)>(Kptq&mswik|Qc z0zmiK>q;nepY}5$lNN?CHn=TU4JJpIHeinUVdeb=G@Cp$x=c+mBe6~XDY1O+3&~(V z5)Rc=u*DlKy3HHC1vjuQ(w6X>GYqTK2zfDm7%XQ$`&M~rM+L%q8`IE?EB&$|&1w8N=ZmXuk{D-?%tZw=H?AM$1E)j`g0XCOnRA#di zCS7~{5*DzLkQhl4@eS|R%Ff9jY|M;Azr`jXTN3@8$vMp}_9&ZHcK8jGbBfz+CgefV zJTf^au4R{@+(<1Z=je~vgl#KZ@yX0dTLgF_QfGiC;yp1ET1Fr3zs!N_#*RGA`i6S{S9x2R zhX#rG+_6%Qw4DhVlKD2LS@`KhH65?iIh zdFH5N*p_k+@lS9%|DYw~C)gzePCq>?#?s1VMAeZn&q|ri0LVaGRlmpVG}mp1jppQx zvS~-nJjbbQrcVQSR0Z%TPhk@@w#w!hi1x{kpJ$^>)btS?hOyM*eH?B84L5u~1E}YYiCO&-a=H&8rw;B2 zIX#4s)4BUej zIwQs0QV#^~bciV3iDEfJmVQ3-JD{PnvsMOa0MC2esn0*mB_=+7um|moafJgrV_ZuX zE`Y5uYP)RK$!}cxfRjQ(ihg?~e1Ky8_MtxyS8t^-)TK{3DOx@5`0%09G}nSIigfnO zV%XL9!_p?Is)zm>Dw!V-zt?ZC3`=l-WX2g8EiB&9T{i}KAXpS(opYT~)Ali-`#gx#BeauWMV83WhG&5r zu#B#@X1%90^|_W(qdTL6U!SKGVS02raA}dn=x2R4ZWvn)!0(lS1O(I;qEmrOH5TS8 zpxC{BS!e^=C)0F)o+jx2yjb|345wesoE)z9v)!@1qkTshG{VT<&OdwVc6Echtnfs) zp28JXLB-kk(Z(B{8Ov?W92tq%sOIIeU=#CA)L|D~2ehsLWj6q=AqJpT&Cvm^O*){p z=DZHze4qnb(+of>?FtFB)*67;C=gf&IhQ~AvRRNg74oLuEcLndOJ_Y}!cVCOaEv>!Y*0Ziq;a;7+4E};qHR%t zP!9YDXepNU$?@$us}~x9Cgi5~vc%#y-OVQauOQhUmpNVfKS=ifAld(eWd9G6{huIN z{QnOmi!u}Kxw3ZS(U(_R0JW8DyK+73n1W{%(wm5 z@1NguCqUHF!eeU-7hbr4AZj_dT%@-TUm|mUxh2k<`4-^*&I@mLXFsjq4wy5o<#1NR znc5c^AmAlS#(sRY=7H-TsJ$`Z@UdBCCoNt8nP6_Z%ebtO8^3h|G2@h1;V11yyuA={ z8!mRHdn7%1UT8g!b!p`3rL{XRy?G4@&g`?#!ot3}9k>mW3#T1kdlogM^P0B^g$g5uAX zwg|Isb|PwnX7Kfu9eCx_1|__#=+wMnH^U3FUqSgT3&NBiE+=044CS$(kN$b<_3+IA znA1PEA9{3m)>*H>2eM9}Hi%1Jz1W-m zsn@4YVB8RYjr`_eom&LdhP}(VrK_#?dDBTs%UY z8o4bOir2XIO8m6BF17`V2hM)*65sP}3!*mIKkPgH`}Ym+Ao*Kc#;1^s1NP5bpg7)f znEUEo%jGYfP<~!>rF8SV!!v=bK#w|Fc)k)Jei&X>RTq1!?zg{=A|gjxp$lwKHFT#-Zs;^PBgt*qkyDnqKRA^kdZc;unauvTXh7xTIIw&FIUnCx&r~kBxf{ zybo2LSLOc3i+5i_@ndaWuD|$x8S8}NXR0rJnsfOIJ{Eo1H%I41onN!{E)Y2|;!Je= zkhO;+;bm(sZ%@WMuC3((kz>UlO;?6L?K?*aypO#PuGC(CeWD5Urs&<$nC!*hZ@-4> zRngfW+ksK>Y4AMOp96H{iYw5Rioq?-%}rN0*L!=95xqTHa^0t{?f}%@+WlfTZO|4-|jEQyvN9!28_7%+3ET>s6MUv_%64QTW7yO_1{8@ z^9mkB-+(lI^mQjbqkCO}r;#5RF!tzQy$*l60!>-6?8JgEcbyTlt7UHPz`~4xbBMiE z@x3`IZ|#il9iWz@~1*}%NBW6MzaqJ`Yz zi%!J_sQi^nJ3lmh``90SsaaW_GxEHmZbEVOUC+O=Uk}Yf#cRG~5{I&qkD}sltaIOG zpIzet#l-{P_8XR5WN{0MYm@yaJXqtH{SJyh^K2~`KHzoZ7i⊁UvY!yfy;ZA6bV z_I&>vZ~NK3hVt@jFY8|oTCD5hZ7X>%A`1?GMQwSaITy=**l#0Z$qjw%A6;;4?PH|% zb%*UwEf{)pC3Nxl?~Bh$_aqNG0#7r4NW!PBufA*pW|v~mwdM9rE>(!^)TUfoePHGB z3wR!sPkmFE)p0IQcziIBsfM3D==gcfU>FhTZfEC>JXN^iC=7pXb&IpJOSXOb8#>jQ zGrM2+UQ$|c8Hzufo7+D8^eYb_;fYV|x?ErQ>U<$0N7cFBc)D~zitkAHQf--U{=D#b zT{RRR)bb`5PhK$*`V@WHQ}?y+@WVenfKDZGXmd8{vr9Qj!iVzcQz_H|(J-G)Kqo!Y% z7!Y;pazQQ(Ai~`ISIHlfQd~jnFRuRU;ru-f1-sGH4Bmz>EZkpE`2wE)(&d(d3$915 zK;uZ?E_(d&_~)ie&^9$~=XMH*Ug)?0)w?AZew|Pkc?FnYif_gEo)@Rw5ED$*9-GwO z*CRauzHH5hF0Y17EEtNIVCzFNe%ZfskrQHqS>If^@%bBGGCaKG&7t9!FE-rDhvG^2 zxx(+cmv0S1Ot6djM|}sTEIWaQ%bV}~2V6h>^HJzjssitC2TIDOJ(@Un@GUoV^(ROnj)UAFM&f8QMm3<$Lc9yFQM*@Es_Dk|!R6pMLxP{S{Pyzr9{JY1q!U!U}82msuVFY1{#`(fh2=TsrsQ*o{ z%q*9c@rPdZC@U%!xSI{3)7vs84=}3--q&Ccvj%!|hSfl`9C}=F(Lgf=(7v?&mKs{7*Vu1(u<) zgev-@Dnd!ZkN~L1do6vbr1Ers1LKRf_O>CnzUGOzTS+Q}FAPGGb=?tv27a*~Lu$GX zY3jeoUka;DkYr!S{?c&`*uMy*?cU?~>|Z;#lhm1mQa&V=U(gkj%3noEYMHEFk$N*xOO>`o~oRQzaFlUlEBlRIX5pyOgb3uoiit(1>C%xiQ!WbyGZ@l&2 zZi7(b0-zx|o)xTQAUaY+jh)m0>qJVD#S{hX*?f7B#$v0%Y1=%X&vErJAV@{K#KOHb zWIDu&khma-bWe`3V_l_$I_C2Qt~e>xwo5F_2cC`s@&;y#2e2t37(ZQXY34>2!_vi= zimY4{%GSuK%IDj$SeE1yr7hswQbLfYSXxU7g|UrWc^t&&8oqgqdSj4$FRXme#P z$VrDUnTulaVZR(;@S*nDF9-++-zpYCV~0y-aKV8r9SaO75tZFg;ZW6!%t|1|M_c$& zReo{>H?5x5@-deDzIb!5$x5rg$!!z9LpCL`9o)wf(ZC?j;(R&U36#pFRKJim4&1{Y zkbH5J{a%Xpm71;a0DV1)v~H1{qdCrHi?}XX z;u(skX2~}2!Pqf!rFhU38Oq*Jk1Bx8fndAl^1&$YqXT!!F~%%*GA7}q!BHwa5#n&4CRZZd zCej_@HgY4^>pM4TJVaWeEQ(#9({>OH=5nBpX%8bv(yEoBjtTBaYY(m2qtC6F&&1;) z^Wtle$=U41Z`Ea9R1VJsUKsGeMlgr-Ujv&5WjVMYj)&8==yWVs4D_%Lknwa5swEoP zo{wB3(sQi8U~YSDAedC26oDHkh*9rY(L2)a8_ibfwd?ntVGIcxWkw3WkYJgpI}@i^ zVfk0FpA#J7R|WyhCE;&UJ?Oiy{I115a_{dLS&D+}s;U$XY#jnTFrJO&Wm7)b7&nm$ zSbxGCZ5AEz!j>-pWz_S+PEI3vVf2ZJ7goM&8h_%j2waC)ua<42+i?9kZ|6h#LsD{& z?-I=rHkEmF2z%6>9|C8aF${!Qr!(_Jtix&c{~a}~q#&POXn4zVHN?19tfGEb#~o2^p+#CuY1~*4Zhg^ywT|wF1L<)Z zF`SAXC3bV)YZ==>!K2;vsT)pQv_D%u01|gi0qtQ;kxmE)ei>-wP22b>`BOLtZw{D~ zACEr0ikJH)FxHy)x2Crb~1?(%XsVEo2lko&ST`{;bi+=^W`M$ zC)*Euh+%8VMpw-uR7R;^c#H#76!0xy&NfO&^biwKLfC8qdr94D6_0U<5_;gvNvDKx z=|fCPNvUQNDx*{^kC8`7HTZJ&Q&RGWn4A*A<`7s5b*nWz#t};Bxi9B1C8T>cTzvX+$bq*A)z)(?c*tp>bKgDlS&C`A7LSskZK_zGPXoEPia)YjeeX=N{EI( z#-b=GYzd(@N}c2>^YTz`3{hVyD}lfIG_b&K>sVm(DTV^)iRd=)Z-ztgGTfKAcu#J$C#j%93E&Z33(C4U8&(IODUmXe-4)t z5l-)NsU2#gZUxiJ{OlME&4w14HsXr!Xo6@Rhm1uniZ z6hTQF!#}e}^5-;?&!KqDRuoVKrGbu+8beb6*%{6kQK>PAfov0Y7_O&>F~aTvJ&g0a z(q_faMtT@+3$Y#)sEr5T-;^EZY1O_ImW=-YVuw`(G#3MZ<_V~XDLYK~RQm+DD>%|l z6LuJFE0O9DgLA&2+D++ULaTJ_N3H$DDq7&4(3KWMcRc7r!2&KFf<4DY^H&NZE$#w|jsHRA$XW@LY`Myj zyON}^)jl@%kU0Tu#i}P{e~|`o;x{@NGGHV%VuDXpze<21znGkL?W~l!lu%RvXA32i z--zJ>>KFqPO}dikOLE&w8nsUtI9a*65#xU$E5{NEX&YHN)5(faN>#o(fMZDsX&W(N zXpEt!s}c#ClH{Fk^feF))x2`tmp_(21QFnokt2L4;@o^&CV~%p5e+1(LK9(QOT^il z=(HC#4SyAlSn2lHGhmQuFM#?<6Unp_YTjL6Aj|8St+l>xW2IEBa;TNOQs)~${?z)4 z#>#Q3lnP2kTGRmlCB5A@Afy~#j-Wa7LP zIm!o+CPalQ2=CeKEQrCP;no(-;i`ykae=9jWdc!ql_=?iDvsX_8RRN1!7~&ku7hVl zaXD)4Gk;Fv`7D>3M4QCRyU42V^C108&|W1STRv2PL;jaXb11TDfg>v?)@su_O2^Nv z;3w^uoTqeLGr(NaOzC)Ur=l|1DAC$74`$#qiUC=)M=`AEo`*OPbes{0eH6V$D*hf+ zT=iAmGk!f%adpov>xJPxnK*xnXuD!YgMlHYiY3>rVoURcNn--pu>7r>D;V!z((uyk zg4pfLHsc0(*e#6|HpLT(wj0k$ZfR2CTxhV#Ir%M(Zu7hllXFG4G;L5W2_J8AQhQ5- z!A(>WlXLvr8VQ64>gio?9a)ak>&Kyo0>==nRX(e!3>o4P=D}+R1gmui9xoDHpq6t`}qu^wZ7V20WpBU`a)!|^S$KpZIA$?H!#L( zc^({R*m@5oym(Wh#DGZF7f!4B12}y=$4L;;%l(K_1gHDK>5@f!j*Dk~QHthEx6@qL zlQs~VGnoCpN0pZ~BAV-${E{jFmqovp9`2*YKB-?Iv_$1z#jx zL**;^wv-T#-4jcVQsd1vT`4IaCsxSCsW!_di7$@kQTymVi_wzKo3@xXSTwdJ!iN~x zXd@oo=E}z3kR)aq^9j!oeJLZ_cnM*Q3{bq(bO|#Zdd!w|d1!>HNr=y2L~MqLqD2<% z)}aw4;p#GQJlM^N=m&JnASCsz0v~A)5^%W<>SS5Qxvw-#T7jIZE z!$#BOnp(YqX&5l-RU3$$_BY>8Fz94VPqRP9_)n-eP2-^$51%)9ya*S1N|y6I6`Ej+ zje=kvE-?63lf-Mx#hW8Aa&FPgHdkQDWHuK19CH&_;3_^p*hw*kFEXRHi+sOIr4*+s z$?}T1<_hDHX0Ex0Ji>wX9A!@5K@}B8nTw4_iYRl1@kkS8uGyg!kNzpmlC4PdV2>jM zJom2VOM*ry((aJkS}LN=HGQTyuzAfY)?TNzdQ-oActeVX0f?~ z%bxPL@rA`p%;`LKklFMpHe?&nmzpQ=*kU_+l}&B=?s+vuvD?ID+YByH?dnu>%2%N( zQ}mprJ_(*WYj7Mgf4(0qYZdq`IW&(C9qVT_Ed4C>_8ceufr)qHx%?3H^ZkKrQo#gO zak4HT`2_lnzv2h6#rhuI+Gdk)nii{$)ijW*oEgjKtn)YMzpOzlZ2g1uyRJ2-pNu8- z|D_Y_DkYR3%NI~Wcu|8`W|UfIPCrFS{pG~EK}nUw^4%z@wgz#LQE0t6{WK-?w-c+H z5~_&h2T(%7C*n4v&<1n*nb>Z@xYxKg`!ZfP^tMBCeh`QK<87b)<87Nw-?r$9>D%I_ zZ~N8sZ97cgw(W`O%O=6g;?#pX_cVFiq&Sne6*if^?Pk-rb?IsHwwZAzZ!2puecNRH z+j@=D!pkmF#{xPsp)FVvt;S;~(Vtv3d^B;2`T8DI6Dd;66-CtJ4erS*quNy+$LCY+ z%6}@B7=^Z&D~#XWyC>^9B~%l~$0#AmQ*kO1!gcDe*lMmYes}+#tV&8s8^;$?QmIeH zHO5>v@+mujS9AC6#0iY8IfqeMi!}8=TZhisR-7WUK6q+xu!l$r1V3 zFEZGxOz&iGaa0AFt+9i6=2TtP@tZ=yfgKzL4=@hv4#{^#g^W;~6TTd6Hnim`Y-I5@ z9_tPP;EUMc(J!)}Z+Jw7AbuUK%byuy4{{*T>@`zRzSZyuStpL*qDDzeci- z{ukL@wQKmCGh}y7eIXXsG~k9$uh?%+7q27x>$;vSGfJv$4PQV>)w~ePY8ptPZ1XdT zRpgN-+g$8LRiWz1y4gzJIDReP<1~3=tVOJ*gm6v1x!9YMI@ptSi;_xP%O6fjNn6DH zU(573R}%fa`Ev<9ianX$XLNidMz;7dCsr9d_<|Wz^w(;XXZvn7M!G$T>DJ1-+?`F; zQtC{oHRB!^kAGXOqE3ZbEAMg1lm*OP)+ix_F9BOAc~tc_EJ&8e{Xsil9L6{elWF`Z zQHApDU(zXH@u9R3s$OYWmcob+if@^}wL_MJ9fZjRT#&!CiXWH0SPP(}b?Kp7Y3kjf|**P4Gq_vk?PD4S74-XlWw zPP3xZR7q!&N_ufs8~L*YUqzc*ldS-t=_{&W)Xb96u+K_b6x@Xs;|WpsUj`Iys_Bk4 zvDVB4T&`vYCkFO^IT(ekKUvovq`9idG!I<^GAk#At%Dt!B`DlEMh;`-5>fLkA?@HV zy3N>r;870Q+K*V$ZS?@$mPhSt)%+1ly)AO<#-osyj*nR8ZIL0C#xwP^6tbT)K5-#M z$GOZDD&ag%cwra9l;_}!u+E8^lvS@GjYJJ}DDid1EY2@r&Lz=j34Za^M~6jp1UumX*);TQa|%qP zy`yhrYc{aSj^>H?VwD;Xy?U|Aj7Q$RSmnkepI)r1#v|WetTCIZYW#Y!s9fms$0RHl zQ}kFNDc#5>lRdOOR`AnS6{C;6+#coqm|Z&>7mkzm0E>Eh53AW=7lh1~(`To#b5WU9 zs7%>xEvZp4SOc9z79|bhNkN;bno}DThRhHOhs;q) zZn26-SHa+}H^U=)oMs&*2WjayO*Zrk_#bo&b&s`Fw@~5yqtGpAM^U$EyE=#J7A?KD zL_@cTk4D{M{TwXt$4JNif5=~DljM0B@YU=?#_ZS+Z`S!W+g!-)i$JX(!V(hDI z(#C8i_E9~6--3ET>Rcf$c%v@$1jLq+NffZJSj|0L>*zj>pn!|oHHoC#j!*Qbe{`EZ0A{8t>Yv%q+fFLuU^ zM*^|4oAC$}I}43RZer&EM(|D9Bc2?<*aNX@Q#m@Q0 ziz=~mk?|;B>|8RzD<9w>rLA2|lwBG(IV|CkYO87%+l9!dxe#QF)`;mY1mxW#8{Em3 z(MTOJd)g|EM?_CsMw9LuuFHkn%T}pBL`Os~TgFqPpwL!nJR*d)jAzDcp0-Nk5#ede z_|tgJ%T{STBD`!F&yCl-ZIy;22>X=~-nNVv#*03-O5+jXW6NkZUh}n88jlEHTgFS{ zH9uRW@rdxVWwaQt`P(XuM})sEqfAw}ZIr1BcZ|}j zuwH{{qfIrKGul*x`J+uWSTGuCa6WBknDPfZTpy<^r2XU^3kj;QSSGj-E4DtS+?c_W zYyW_)U~B%M!JxB(>2E-hXw03qYyE-HwA!Br4v_j97-LijODR|N+Eg>7fk$~b(^dYY zgL8hM8*DtFPhgyBJwA`o#=~Jiqdh@&uO|G#=fc)+@v)gsT|FUxT)D((raHe ziL=vqbPJ!vNi!bSOyZ;)kM2(5WEzj|PvYz`9zB@E*=szioy6I1JgS?-Q5lcwCvp0| zHN*yc%74mP?=E^Ga*sNg^d5GrSLyJtdighmdZa?wHf`q(t#nq6)TLBRIfXyEvHo)C zJGxXuo#}LAy>{R%C>2wA!9TmP-Z*gd3lWSR(;t8*`WH4}AIs!Fl4%KKPw3^lz(E=Z z({brC?6Nq}&Jo{Y`P{z-VNaiUD%Mxt!FZZpTWH*%|I{>s=2bKz?I`avoYXwxj%{23 zXx_>I&^-3vncEGTr&jrx^|As3xtA5V%yWic1M|QQ4X?t0pXpmbxwVN%x!tVaQp!E~ zKREv>+=KQM-Q#8%c@IAGj%|9Erz&F<_uYAhsB&#Pv5*WfZ)fH!z0j2Grjx(L903}f zyA^3LIJJ|fO0Q~$$G_8Q@Li z1eyFQs>S;KZJ!|180db--E5MUs_){K?Rq-gLLa;7>ISjfZ=wOO|Azp9xxtUn&E$2! zqUiX`)Mi*SDR1y4&AUTGQI=LJ%dMygx;DPM8!U#bv!93C=dM>##nk$*Lht3qL@+Q4 zb7)y3M8ZAU;Ziz!f7&YE^!%$iY^l3;5dX51l2pB^t!R3*x5>)(%=dVhYe)9)A zGd=1jxb2G_{DYmD+WQIa_%buoyg=l!0xyyLoVmxlE)e#B+P;KWthY!9!S@4vPaqZAJ!w0i z4T7Nj+=&Wq(C?mx^>G<@)lj~x3-)y$FO^3_ouz3~yMXTl1KHs2^4*aT+}*U+0K2z1 z2iU!Fddggxun$e8iunnC;CZSC{x#Us9zxM8UlfY)FD2|a^Xv0r$7yZmut3RE-_kUQ z&e#(s45DXrK13^kQc+!>$~H#-I)(rT)W8X9NOUfc_U>5-5#K+92|sNBy!RA|D~D8?H1A|RYL%)JU2 z0iho!IKUCe_MURr*v3D#GbT6$`(>*k>?#ufW_1rC?KU#@$Vu4B84K!tXH27U3@FX* zF@$F~@L73kXU{j>QqQfM=-}CHnb+dsa!?|#pT309I2jK)5YBJ@p0T>Xg~NN@3wS&aC4(DlqFD*H`Bqr_tXV`f|$pC!E_uH#V?Tx$NvN|WBnNng?oX50@Xgh`LwafvNh8D zm(7ILWSZqv=2x{^70IyI7ARQmr~2k5%kZN>!3sajefHFGOr|G+UaR~BNdoxW8Nm^Rk>2@Xhxtn@eikYTP!x}^lP#V{oP zLuwl9gSneQW!yGlVKLAB1U4&UBIj_;M_>Rw;tQdEhwk!MIv(*IEj;2&8_TuzPDgwN zRlfA9U}G7Mu@qTYhD%s-OBs%{RB;*Z#!}nMa4(iJl;OTCwWkaxSgN!P@5NGOWq1Hf zRg~euELBOC;c}L&D#Jrr>UbF*&QiuQd<09?mEnpi-&h=i^_EG(Vkzc_*Z3=E{txr# zKV-i71LiGdIBCcHoe!9A{ebzl514QNfcfViFyHY3^PL|suPL`@KD!*3lPpX5<@U^P zEVpOAu-u;cE#)-xTws2WvOmmxQ4sV*wYcz{G8X>PV_jMfgytM=-8Efx(y|oynv+RG zGKtD-PFyWRX|FjcS;}~l)tmkqAR&sxo$8tq%B{oO$c_IF1ghvLp? z+S8yp8CeD&2X2`ISXA*oYFF!WnCsQLU;Y3rswDmZnN+hsE4G5#>u^D9r~JR3?SDvfa! zJ$oCEMHaG@`BUbIclK5$Z3*%ZzjhA`7RFr1A zFs>C%ybMkzf&G!%B8GU)GXawC*^yp=EGwAA+=EMZBmCfkhIu}${w@eYy54HBsADyc zC~w!6G*0j1>xte1TGrHOYR;jB$T=hrhCYiJS+o3#KLc57e~Lt{^!M)>>;1Z5vS9<5 z?;AEC^L@&1eQf4?4Y!AQa1W4{&>l#hw^i7rMWnWzsW}htP@YHcF!xzzdxzGm`PYi= z=i&20!~5cW>BUD`;0XHX&w&c})(uEzFQD@6eNsszw29~~-0etccepI;d7Khjye|lC zV;|mjXhw1m6q9><12n_B&TT);>+=*&vFwQ5`;M=5(Zjr3z0+s#wZ!I#SJ26{jOpn=rA$7g*u;`tUolt7WL5f#l`N(86>DWL=xR)mQ$WYQZ~U2YK3RSywrHL)9;f!7 z2|?1uU1-1`pe285-wf9VSIh6P*1rDPJQ!eSa%Eq&y6+GFQ9{v%@)Y)6M+=ot=g$gy zzRNFDZ6RJTH5Zo=HYv7<{DXXH0qYY)^e)ac(zo~mgD?=1?_oT~f3MC!3&^#)mfwT9 z5E&l%qPSig$x7oJPz~c7q#7NLI~mpJ{ZU62XvXs_q#0R%_N6t$G)`Hp300F4gKHq1jr;W-G{80cEL;>d zyld~vE1ivSu6Kb*DYANI0}es^J0-Mmqtj2&LhEhOfZ*)f`wj;LKgQgWzhRW1;}@g^ zef>ZQ`ub5y&@_rs0%Cg(*pF5Pykur3aX-!fib<07peY~8QB z0CYf1$TzgtS2jYpCjVdcR%0w|6iQ<8n38X$aCy}96F8|Ms>kQ*K-ismZTvhTu+N@7 z9Gz2ajB5Q`!gk57$;GBja#G*JL$Z5gksDYnz$XGA!S$nB-x5vEk;*rAz9`Pl94h*H zQ&?Hrw7}y;c&se(?V|o6tyf{sBDhU8Be_aB_w0y(P8$Ez*3etEO4L;bN}mB92U$b-igEd4|EBH2-)hST%WkT z4SKlYIDKjFfTDiSh`@knOjz_saX=8yXsO`r&rz@xu|G%6Qi}aK8kSP;&oKZ6_unaT z((KPMvWbTMITn^O?$6=Km{(Z#=MXH#smxKZ6j7O@W+_Ewj)tYwl{p5M(p2Uc5oN>h zHB{zU*hFJx4yPyk!pa>N`#jQKh=%j_Mr$X4t*%X$+PT5c;PHe#E#nHS@sgVbe6pYFPmjA!Kt%s z5^Tk=T{+8Mgwtl(i*WiZdlAlNd9rW;Jg8{ELwneb@_ z%rVyGqj`lat$&Xk_SQX^>rmCL%g!~4KHv!|I0gfAz z;9!5p*B1AZPpO0b@nG^X%;~Zr31;STGHBss(QI1_as5Eg**}hs$zbkPRV|m{qhshP z&$wL1M&0i8_#o=`TaV5W*y|CttmI$|lw`=kt;=OzEY-eTMzB=Ja#;XNaaPFWEQPI* zg|igCqT~;L_8*KZx{IfdrTDI%yc{|W!tT3z>KIbR@9N3Rr4v>9uAVxU;=6hB^4K&t zPaR9~-935vY#LCXZLp5BLUMI>z)q*}F@*?$xR;O)lbgRt24Ql^`y()s=KhFq1cdu` z51dFgY0-gX?k2HzywkXxALR5C?Iy8lyi??+A5`=c?q)G}f)f*pd!HCWaht!YfqiEG zTd;((8kWsvoVRP?z+->-9OM15Dpje2!){)&+jCuRV9!26GN7f8kcA`W$9q1C0m*BN zK^FX#-HK?*<22_S2g%bNLz1VReV>**Ild-g07PY14-kR@KfGwABQ4$ab6O{Wa9buI z;l6N+799?MmFGM3ir45Bx#TJ({f;`zm7IS%V-_+|<8`a)zA`Lmp??-H;O;@Ba+~WTNR$NIg|viMiRWLOCv~d>Let=O+U<`7asa&9`b{3$)uy@ zhk-}j=<8Q7@q!}~Hg6ET_$B3tJosHCvoSXG7o_*gxcprW^JG_l!J2;9f8Rwij>tXz z1sVMqbN|c!=n__85e$l@|6^uQG{2}sZQa3saE=6Xca`|h{&yZBbHAnt%>Bz9)4SY& z4}BMYddQ8WIwR$tAWr5mSNIpM)(*%~4}iNm*5fyydh%cl=wlb3dg@q;Kf{xk$fnKk z)UgzQrYCP6$tKS9)YXRN*qm&7+>;H~{Z~%48?V}pUPUaOg9%t_*&Iy7QmJz=jHOo2 z!JJqsZ4M^6%+!)T2f-DS)Wp_o_>sP`*~}>umI%ylw9`pbCWMgP@o2!?rXiJD?V zt2^>P#kVn9o1-;VGnvkcx%p0!Ev3xNq&z29<}))Bw^fWljPpz7aK2kyZ`;5S(tg2Jdh$B# zV2D9rV1BRb5VvHe!cDEnWbjod?TQx^!syl+;w6P4vSmU%mN=}tn(3s)`l|}B6P6!g z(`h~WxBaNvE2+60cA3iUP7ux8xGVBAC-6l%y>qkkMX3i+XvTG;gsgkZ)I^G@fZp7x zTGqd%eo;f+*BTvb=QG=LLOufO?CI)mSLf0J6rl0h(e`y-6Voxhwcf@PD5Q6J5O}gu z_eH`6F24csZ*@wn8i0RG?sPgJ@JIaj)MroBYBxE-&aiABI^ElA+CXNzeO+Aph0}sS zINsO*6GowA;6U(lK`FD~1lD`nQh+t)o(1O?B+ffm_dC{S(1gN)FyUWs{#*k&10iR} z-- zYHY3Zh@j42&`wxh8$}=S)V>vu2!fsPh#+v5h4>FrJ}D#2!CGhMFv*A@@C6JyFkIOs zo3=yjsWu;oG#;p-kFMVrYY|1?zqv0qv5DLVVy=nFs(c_;vXu6LSj$qT2V#@yK%{V^ z%}n66(PD-sMWFYGP8-DZUz<202pbthH^y9zORN-$>9pRB)i};3HdW(pEY)0%d$E+I z8uw+XJJmSBQmxf^FP3Vn#sgTYy&4Z@spr+W+(wb``i^Qmluhic#=}`ka}poHQrRbQ z#mFG!b>Kdbghf+q4p}uwIVmD&v@jxwUKnU!0lNaW*!GjS9rK2hH1oha>AmNqJ@cg} z?U^q-Y0rGcNqgoiPueqIb<&>s<0tKzH=eX-zV0N$Jn&A|pR{Mb;iNtDW{UY(&hNpd z`;xFN+CQ951@kZ5nW##(9EgmCfAo0XISGWBkr?eev6iJw@5CmS;=UJiFETlm@z?1b_4en`eRBPdXB5*~lQUH}U^yrQObI_2;#JIDuj94=1ps8V3#UO53#HCikHl zFc=-+!Fju^Q0WC}o0wXQ|e)c5VF>d_4HZb<8QyH^b ziU=1(UWNNP*6D9lI5-w)|0^dq%HFd;s(Lzb4)rdUI6R3S@MmvWs^B}?G1S1mR>>)K*p&9=n5stc61((v z3Sk2E!3XvO6YIc!l!^7ur;Lf!;+tn!Bi!HmDdT4#{PK)zge7gCGHwR7U!G-+^}GcM zCsvJp^N^px_$iFa4bd_f6|}C$^PYK})%4Ao$*Y28Q1)SEbMA&1wZ`=PCAFGI-)t$X20z25gJ{5A#Nb09!d7){aBVmbeum#S zbqxbQL(ecZ9`?hpluUS`EQ+wzNTx_|189A!su@2+UA1Zpev+xV5o)Fd?6(1opW)yD zqIctd#?NqM0Qeb{*I_)IuB%tUvI+e>a2F<8AjWpxVc~&#X%Au9N zf@)O!id18*K8#ikZHjbxFoJ9 zTz$o`nyja)USAsqZibRG&;Zx=^WbJEo>vl9bAzP(44EKOinadj=RtYTL_!lg&ff&K zt$v2X;n{kr7@QH)ZV{&$C8%scN^l%0!Evh+yqLi#0jGbSabwP^(g+V`W-2@<3!kSf z9p%ByOonqZ#XORkn!^7;eg<8r2iz_*JMN(U4BQLCHS#H7XHP?R_O!y2vgdB#XVA8S zpCMvq$0mFj@-uX7>N2d4yDL+G{ zeU<7Vc@)s=LHjBz&fDA!)cu2NzTjurT#fd)%`?#+_o58=w@`**MVLd+paltAHHiO& zH2h*edFwnSoOfL7fp&9g6Z_IexAyZ174uBrz=UxLO&EVFqZe$opit}I;86cc0J$$ps(prMayc*6?)gYu$DW?Y7a6x zTT-LHZh>q2LWpUQ@AtRk;z`jnaep!?VM`99*7Ys)abcgv7I}kf{RPDseck2(rV9Z5 zS|=3@N6@ds@irLyiEZZj+~I&QK0%xeryqtNMl*b$j|{K`dDuKzbt+JdqWo3-W^k|g zt%CLk?g}SNse1ES9RR>KZhiY%H)!lKgkJCRqHbmO~dS4ac?p)crN! z?Lr*xN(W@l((wGNriDKWzV+CZ`>lt~Ojbus$ulgC!)gDEdQ6_8Vo?HZ;~$g9UImCt zgR20W6=Wc0&?#ukN@7}jlB_j}E}$5jr(h}d*gQ2$X~yPhSjsRq&%jc~v3W+8vW(5M zv?i78>UFj#M{%|+hrxqQPs0Mp4*+3}ALm)R1n$Y%__6iqv-Io=tbQha7*BpCVNcFV z4>s)mz=dLNf_HA$T4zR-f<|kW&uwl&} zw{pf~yTH@4$J|>pQ|Ilyth8u@2Lu4T@$U{pTkoAYrNqu0T1PCGWIJ=fnMfPUB1k_T zmm&G1{B$g(ALYm6v1uwl9ZTs|emp*#Hq}qZQu?WWJOP^)35>BJ_^ z@YAuBeuf`U%%;us(*cFcsxM8&@^C@D^A-*MA1F!|}sd?)xv_vkFYr!%p3kQuA< z(|B2ivsC?MnHNi$FUts$C7UkG0$9p&Ste(x*2}VRmTJE&Q?OLWWmz;!ajwWzEQMW> z&0s0~icHN?URPub+exOS#1&a8n;39Kmcdf;D>4mBget%KC4Zg0 zT4&N|ZMe`EE(O$~2I!u3gG&L! z`=9+-z7t&vAUTq;odQfcW}u9c!-htt!sd^Ry79s8W6)}><`S!zQ% zwwa}}((@N_waoX0vow!jDPn1!f~6Eo^VBS*UYe(|QY1Rdpjnz{U=s~X^NcKIT$*QL zDa+D4j*fj{N}i(UPU|@1{L6l4f8>RDBB)ufRZ9A^-<*c!I%Cc55bmqlodP`F&|g5G zgT@b&QK@$$W3u{7=tuIyQJT9tm7%{I{X}&*s_?FkHY#YU^@E!6(j#C?F8$-RZIMby zSA;8W1p&z5A7CCWb@?h{bajoX)No#S!cg*gz!L6w!zv{kS5Qql#CgJ6bEc<}gf&!V z6C-)8H%)(mq{t3iZy@u&i2 z)?FmR7tmXDuaF3v```MwkP=}{7DzCbmHi4N_^&x=`S|)*XoivU8)@b{v^D0yU6`NR zE;Fu2n0UeUIDQ6r9?9`DDA!}jF`Tv@Dj!QtW#YL%9b*mrkEPslapbSZSOb5DR9P;j zU5{L6mwB*p zz@H`Q>}K;wRQxT28$ed=YFtyl8akX_45>5o6fC95%u}{$~U@NYfQ0V486Ux>H86XI=O z>d}{kwa;kMNRZ>Zu0dJ0p)6YRH7!(O#APjAbNJDRtriDtinh8EQeYIlz zj5XSiN!vni?rK~&Q}+(m_HOS?FJSpE>u|WXbEeMI)^@q#A6Xk{2GNEz11n)QBX+IV zde97fJ<^OwESuI0{`Br==YnROo(nRYi@TQ4Yh=R~hq&p-PPKXfGcy@ZM_HaRGn4roAA!}58Xy5E)Gd*=yBqoXnbJpv=fH;KI`VJuQSj^kC1D<*W1-;JX_1gB`hBSaS68%lY+GYjALNP2&x1A zZ6_ImT}B^zQxnV;1OGKnvcOoi{G|?VnzRl8V;dO-b_&`^r8rk>e8Kw{W{m!!Fk^g_ zU11J3p<&*zi*5Y3WIVF&@`u1?08L1^U0{0|CMXF9msvuuvC15{|N1%qyz8E+r%ou) zkBt`Vn3dp8U${IZ*{Z|DqR zo*O1*ZrwIMBYeRa)$Lo^*L?6?pN0t*eF^G_atGjz@+}7hs+MR6gy(}gj}_&CwuJkK zI{hNd%l*jRS6AFSdiP9SsCVn4=Yvzb4R4YSZIy*${w*8pIh%mtftUl2!#N6Ymxrnf z2`hl?nh`}6UkiPB&Q3+=zZMc?NdI2-w-C5%rE*X&E6i(Uw?z532=KLO$m2_=*SFZ< zS`Y1bQ+cWn9x>-ypZx$rJLy_Ke#@o7%RU!pPq}hfJ021q+znTDr0v*tPyh+bH@QN> zvEGv*;bm^nMa&BNy8iK{vB4|>MZJujri%@ZQp6}K<>d%Rz8#(6&$j%Q4FS=1AbRi=W z|3Ss87I_Ga<>I9uRDqCi{nKyEdMs?Yv?eo`fAjyzU-`Y{&y^j|H(l$5Y%f#xd1gmY?;DzT+Qu^Ud>%F4k9(c6hkA{KKzbGoVVAed7+5 z93Q%d__CmR!{gg8nzf@NRz5qibK41C%azrcH+h$#KMSIxr%3=c_v?bDSIaKjYCd{7 zSz#TNBL&SbA6_~%TiFph`C5pM4mM*? z&D%V3`981g_?t%-ErufZJa#~l%|7#>#L(sEy|bs>d)>vsShQlBuxIZRZ}PuE^}h5d zH&YrBa}(hcFI096UbZdcjHcuHc~Jw(>hI7IJ?Y*8Zwmu0EF~jWUgTXwf@-$*>XEF? zf`#qxqX%`F$`3x79eUCdeKK;i^RL!UhqhS;FT3uJQ)2SpBmaYw-&E$T@$#x^j7T}- zmmPnv^75$Bi~vSoe^cq6D5scjN`HLGqg>gsy4vSXU6)tUJKsD%=uk&Bi)=iY@@3bL z@Azjvy3%LpiqR2&-mA{c;s5_OqiNW(Q_c-{{0~|?wq?5FE9zH%LF223SB6+?d6!b+ zw8@5Y4baWyERmIBaN1;rp}yr$Mx>OnR8~rvdm){)I_pj4)XBbI7Cb*vaP1#!yN}FS zWfeE*jpn0|F6H@DApC6CKe5d=?T3WM%4KJq9jR`LNckOeu~zu!o9N7&L;kIxptHZa z5|Nqm7xewu0kTb&XsZ9AWK))?7cI9bw3_5M-FewZ@*jL++nYMxtNMrjtEapAZ_vn0x)PD{u&elv z|KWnhkq-DoJ8(!ZLo%yCd6JGVC%Q>pf&PSJ~FDwBFZU2Ke-c)vr3jo0E z=<=6YDR~~_Aw$fA6U%*e<+wsmG_HGmGyQ$LZL)WWNPG~Hd5~A6`d--EHSu2MxS&w# zo6vX&X5>xMqSS3dnPXFBLG#OJUxr{{w^G_&nvU?`CTctbBYhKK@}5_gi7t5Z$H9PvXOMR(VUg8IjWBx!EdZ ziPsLtZ(AeWf@+N}e-pjqWklvPY6U7*)I+*{NE2k;+2TRPAAY$z&n3o zE4J*l^}&_H!PeH2O+%Oc;ssOvi|cO=ni63aZ(8&C=GZS_7Eio+V&}C{wU$eVWHw=J z-jLGjtejVpmPDLvYZzC}udum_16SJ0ffyi;=CD%MB<_IH&aM0@|$1#c=% zldY9CWe*s={iZjSDUl=S$-1d3-bX%KkK{8NCYCAJ498kQtjfk3l3bm`bfJed+CO{(rPJ zdeZHpGt4Jg;(g1%Z;!(N{enc#xp_FENbwcdz38L+0!gUu((&E{dQsimQQ43Fb#Ur* zg{@%)jYl3`da=<+%?r2uPZTuwKub>d$i!>z7wPY0bmp~)%x!`{5QmqctGdp8ko!vs z4$Vy`-&D3N3CIQ(uiwl{xnkqu;3NOO{h4M+d7UXm+!CRTY|60C>Zt2efa_g;5=s$X zUU>6-kxR7QSXe5vih1_pJmi;a18pm_NpGUdZ|>dKWs>bvRM7nV;iVy)%r+h(e!IPO z7T3|*nGrGfs^VuY5%xmEfCM>u2Yeh~dOO{QY4F*>DLzN*sAU!g3k$Bp|DD4gL?qsI zyd1im9s;tk7d#9*y6*QsG6d zr;C!l5#s)^ntCR>N`FY}cU+3d{DVua`mKwp*%GVT4FG{VRNysiH2Rt~otUzX{{*M3Z6W8c>knufDlrnUMwcCFwJ6)ZvGgb}EQC-n8SQaXoPq`M6^4NI{C`hJ;zZ}hvGBWYO%O4#6ADfWXb7pObf=|-&ZPb6H5A|QX zPbPY}b6XbrZ-4xI>Wexd+CTg&igW0gj5)Io)2+F;cOyq>6Cmia8?)r2zeY1DOt|?| z;>#2--xp2YBXsL>jjFuh8ddAbo$=j@E*1%Wdx)}HY>S~oDQ^ybJI%JuI8o4e^x>tC z2NeCfM>w4OanZv)LVxi^DTdZi9siX$QEL#g=}19S_;R!Dapmu;Y)gjv6VMCqd~HjH zm5&d_Un?JNogfR^j!(H(!4mO&4o4Qk;rz+vaKyQs@W%-zoMRaotk|gY%H~8rNH9&s z^*CqY$Y3IyQ}J&Cmy2mfxQH9sR7UcwNJTXC*d7v-hV z1f;E}jvi;5=hCm8aK;V1mar{GFg&N!3jJR?DlyG|;j_KMx6)Zj89}kj&2aiqdak%3>8aXFb;;lyBt853*>o~|j7=x^k^x954N(*a5~>|@RY)f_ zx*DraMyhN&S@_;$)yeizNGB`anLg0TTmR`%_mDj(ootVo!5PYmg64$@iNXOFBraPr zktigz>4lr8l6Vvc8~+>85c0sp>xpwzE*`z|D1BSj9U994MOgSu4PCvoJ8Ee3Gc~sh zr{-G+m}lhh8)O!KL+7QBn4#|HxY8|~JMv5a!D1Za4-V<{v$31{P;H%p%KcC!O%G5( z(P*%)~$jjBDIUGvi>L zP&eNUIZGkX5KF}Q96T`vx}I0|5%8iuh{Wkboris7L9qJ+DPV-%+h?5fnC z1cJj6bm4Hi3i^s%#qc!n@S<{IS3wVe8^?46`g_Y?>5!ywo+k)Dx@*tXcM1F6pTmqr zT6aNg(K4Q3LdaBsP?;O$cVU|E@YQW@{kAB&Y*EU#M24u?DxLyjx#H==YcCd$d_Y7W zetbZ%jzSF91ThrRd#Hr-Q{ulL4W;aOmUv$tin}I|D*_z`1>U_kFc` zgAM@A|Gsi~)3pIN@4uIi#qVeC%j^5T=qbWWKZzacc>kX41c36gpY*8F_3r^*ee#Wy zk^|*UoBaok<=l8Y>(3R=XAS{Cxv{kK(BSc3k?Z>bLiyoR{qE0-Qb*&@%0PYoC^aWfAepT-vwoy3&SA$+n@DE|Gu>$9;!EmpDsNMZ#@mXrEWV+ zL8SjTYGarB{);b=op*}uzmkKXSsZMwkk7fV?G@CI+}qCrgzL4E6*rpkC8(%1zv z{yOc<qGVL($~J z0Cm2$=%Xs(O>Rb!^AX_=uKw`PM}&!7QXy%$0Iu2_^8njt7k$jJCDX9JxOYA};}qHb zY@U9qc0;Ss`IEX03ddJ6HpENen(%=DLV&>$&51EKm$qVZH^#%AK8M=5qkx2`Bhl08%+u`d#qcl!J^ah|Q0vutedVY>*&sw$tuBlz(~^<& zanoOfpUC^5Ko*YB9Q91VE!ZxG)1~tvYP)89k!B8xU~v^fw$F?l0(Y7wK&GX$?GzaY3mW<<80m6OpNxd z{^l;W=yi397pEbhYSWhh8u5%dN$nTN|E;)pJM?Zz6d6A73w_9hcIQ~%j)RUjz?~!7 z*0RZVmCPIm{pX9$1bZ8H6bMGrk&X(}wPt_%;=*E236oHf&b`PaEcJ9_63ppZHes2k zFF7z#jbaTYlg1@19Ko7Vt{pUHk+NB+1KfA(+w14ZnFLd_Fq=s*&rvZ6%IiYWHo8R9 z9FCG6nGSjsecFmty5e>lyts*URL&$1?}iBf4d||hSGH0PaPx40)aTASBP?#9)m|8=hp8Ti}MDIgj z<^hSC9Fu|^6P3^P0H})9wEU`d$|E3u{~-BWpkmO=xlW>cAlJnpQv)s z0=@Q}gX?tt;b4ENXd(T2K;l-2^ZK*LqALKM!FsqeJeR>62-_+q-7CHT?_(Q!+aiMq z4n=dr0Bpbj-SD$v)k3(yni)uQ)7hM;@&;F<^0TP@^)|Io zRtX^<)LJ+VeP;!NKG|2qgovmgfEBGOUEj@iD3ok8cJfwq35;hBLMu+HXVai3dOt}M zc+-iZgsmp$8*t^9x~aK>=({GhV5Tk%1=tT$T-8`^1H^{Wq&QWwaw&1#goJph`nc9H zSfF!?i~i_SUYFSh;+WD*hihq2w88P`ZU6{wBv;p{kDBIO=nVl;T7uZa%lfxI2+SOA zh_e9aoy(~JE4L!;Km|-zP=Pyk&j^z`LwB7Reb??;xJg|I&s42;&r~LLsg6EzrgxXX zsb}dX@b42l!3UYoAIcp4m@OxM+X-jL0B(G2K3^bplC+P3(_GIT;UvLOgPu7XEz-;8 z#w2gmNI|m`r1;R{-lb3^x6f&XL7(`A0wJ4kwV*`51N;(*qtyL}SAS@Zg`dg9qKykF-r16?h=;)U5$(*|4G#RwE>DwehkK0< zdUbsfeG0ONgx52kKFDH}W1nxJku5Q@mXI^06cW9KEfpf+^%#AF}3B_5;$9%)Sla+_T0AGb6aZ9tWk=XWyXKFJ-6-l+}7K3t8lUB zcBDPG9roO|+H-4kLENHqhz7vx7-%j^+$wV$EejZAE+R`6@yz*bH-4rg!<5ENuV4ow zMxv83{W>uO%V&Bb{1g$u6ymt+jclP8FjnF=mOg7F*ZgkD zgp+75xu88U1WDhtH!Aw*JJ3SJ0S@krH_=h2as4-vPK)Koggl>Yf{Gm;WBax?bJ|AJ zKP~shmhfI*gyS|@@U|8=6~YH?#~=uU3gLq`{^6EA%~lvr=8RV=(5botIJG1ryT{Tk z3#s^B?H)+ZkkOiA4#BcJX-$bf1Isa+Lh)Bi?1PB#(Cew8^hhU;_ z={hpb5XAgsn~HL}>kWn9GV+%p;YBf}?u1}crd076MRH*=U5DWe5uHfaVd$<`d`7>R zza9pTsp1g~98<`6hFHiHB6{eJOd*n=2_wgpdJ%#dm{OKAL}3lRk}>w>uP8N;d*D1} zdk^|X1#TkX=-$LWUYvp7QZd<`h0Hn0uardOxE3>W-LfxyGka~3y3-j7zfvk9Ye?H! z6mGl-;)SRpAx3^+1t1qvtCL%*M7*KFwW0>TC}q4|PQ3;}un@ODQGEOZTGzSJ>={bS zV1AhQGVCf0dAHdm?)0fIeUrqp5W;PsDKs@F-Zq6obo)VtyI~65Y*dq5Wgc^B36Phb z*KCH9m_vCWn=>?wXj+EwhePeHsP;r93mx7h+4}QtHp)#Y9J8|qG=I9DK0;RSvX3Z$ zk4imiLa{IogU|?BP(3dWg#%wFA?GWLbEs2i03$99@DRkoA+^XrcoBjar>_gmmdHcC z>ut_C+Zat(+$@KatocVAMVh;JDTspcOq|oYB)sa&Pxpkc}%J3LD=RvIF*L*3kDUQA;!_wO&nCZTf=0VJO~pEqbpaOC%jtex{1Mhje%|v z|FT4f z;DU$-b;)v1U+AKRg>yLkIRWKp;1~W31K%p7WzXoGDu=b0^@MG@Jb)aJ_ojn;beLFV z<_tM=I5JDF7%yisdDS9d<_!p#c^&sQV9n>Y3OB-WLW@mNjhc`3nINn$96^TaxW2S1 z@OwDFUIYPf`a*N?yi{5hNQp7D<42gLccOE>zNcwb2&uoQ@wl*)J{X+1)#yvNx>?Si ziX0+=Q;}c7LQb&<%4>bkqKS8+7$)AcXyTn1>gAyIMbU3~t3Yyy75G_LPO8eG4dD)H z&;}dgL(M=eZ}oS*X8el?wWry4woN{M;_1VYd~Qxx?<6dAAjnxzp|$ zXRjS9bAmTI{kZdAU^pIbmKvoGnN4KjBE|gGgD04!pwmWa{V+GLDsl_#KHj;*mf}&n zlsP2%ov3!)+s0tiaJWON8%{iwPvB6H%pJORDFw+~UmL6)0f~FP>+~FeWPS#H*fc2s zdn%uxfSj8icWUME_&<1Dtm=V1mp5w?7mR5b*i<`GFgPx)aHjwcZ_f}{<+t1^V}v8z=|TI)yFHd= z>aMq-hUua0OK=G@7@A*{3Z^HNM@33@1n+wbPNPE9jcf0+CAcqB%J_>?!WG1kq^Dt5HDvz%I49-6Xe6j=r;bQw2!1=`As=Tm`2N7`)^8; zFkhyRW;cjTicjfELVN^*IkaBZFU8%MQt|^zHCyPs$fRTn4fZj+4`iqzK7!w8(^WJt z#RFE+FV-ATRy0uWB_X<<@`8vvh$%J9M{u4g)wUE5XG$3lDBG>2P@z9W+)$=agpVL5 z+-5rT(kvsS?w!T4=IAc!KJ*~W2h|hR#R>U}C99E1rse=;lA&%~T^NGZ2lpH4LvI9$ z6uqG#`_=)*r1;kmtYM<N#T1zuDQQ>tD!e6LbSaL zzcCye=H*`cErmntu?1fc6AAZ+trn{%KE|sMkYzE9f4eK$pF!1ZSWYZKvEj9V^Ep97 z<9w=BC%NSyos~#B2Xed8(rJ1tIt0?$=#3;}^h8EFU-?*|cJ&gdo#;APkDCK3mxekM5<~kJU z1)$2^9eM#$Ijmdfc|yYgn%s87p;%ij^7;vm#*a=aV>-VAb-rL3bbb}p`8^NQ_At^p zISf;u4_d`^z9C&t{FNPfnCbjwVc7ghbmyyD5bCUMrVqfsMfPP(x-%tb6~~hu(-L9L zu##tT?71I|x)rFEM;*dst#!yG7M9RjXEl8mbI*xv03f7jF{fHvE<8BI_SM0wl|`YtdnkCc19r zYmrvHghBkc?<3GNm6t8Y@k4ZrjMYjNTk4HSt6WC6=#dYLG|TysG0)0>)T(s9JfZEp zJ4BJHzr-RK*Rr`VuI;0zfC+Xa0KaC8Xd&C6Z}jZ~ueQ;PPVNO$?}HpL^-k#=Z8OI% zGyKTF%$k2xPURh;t5ueYV4tW$pOj^mA5FBi7vIN?;T>f%{REjNw8KDby47{Y6j-rj zPf?EynSguRw1E_5`95!kNR_KVVC5bOj5cUiwy#3*jF2|W$Y->HIIC;|Z5ZVMLN`z8 zVbg|uQ+J2TRP`0sL9u{t59eoYe_FneFD?|j7w&)+*L+kyDZ#@;rt-B}VDMEV2ynxF z?o2F+LYLi+5WqL(r~v_-`v$s%AGZhEV71xHk1E3}n}fNtwuCosr|Z766h**D^pJ2q z``c7yFztKD1m>}{5cNk^l^$k9+?c0V+{^y_7>GD&z1;>dV(F7<887}vgJNud4>I1E z_z^AR4&O_fwda!^|3Wg}$QvhKc>rYG2<^^$=de_-|XqJ;@%IgWe0GzMLaq%pC- zEx`7-VtSXD76zEIu^|vF&?wLYrmUPiwo{z0S2g*vQ7_b+^;Fagi`_HhW-Fp>tKBoA zK(BR7*IU}{o@om7E$|F~W%tZdpr=A!X#LoCD7M(pWU2Xg>`(+Jn<|0rn#F{m%&yh| z+jh)*;=dck&8E^1b zd@)m31zW*|xB3dI0uYp}x_`3cT}YhzO3^>Ns<^k|3%E{4E*l+L3|WsN`>#VU!O`O0 z73g_(r?T{m7SUiyOhIu{mH0TeuQR*2w-J)YR*7r-=lZ1fx}HBX_qh2BL$jP14q*sT zQoWczz`9LSNMeU+YQguEQ40{XP2y^7d`2x12>#9fnC+YWgq?XQ>&(Fjtm}8-}@ADN6rl z55qjz#JR&TFP2gd!@OB4aTw;yQuBvl-B~Jm7)FFikQaf7uqpvZq8^@MRRXa2bsOuC zK%$X_j>G?3dP@$LHVY|7_dze>G+?Vyb4WiFec{6;V9>ClYFmR^FMOzWn=I9{=DzSv zHcJKniSuOI5>0I2$CetU2yeQLs`^j)CWr`ZJ_8MKoH!3`BF$3?=aNt=BJd@zQZn3d z5uXU7)qVy|JU94*ZB4}a%~EyA3wb^hEkyNzz+cE|cgTzfcJ}B)(huy>hozlz>@|}$ z0}8T~<^kcwQe^%Ef?&zQ2Sfl%6+a;4EM<5=gtJuX1A_}c8?JZSrpaSc9ZTttNqISJ z+Ht9lrS!+8yj&)Y zujgwlu)^Hy1$StFD)DCS(EcPcKRoYRdK$iXd%!8XWxd00U91Gfu#R-+2XH<3b|I|2 zTyCInYRw_&M*d$j7&%j%)@Oj6k-9ZV(6ocsF({OVC+hA1LaFY~M_-stf58_9nF~3R zr6Wquz>ID{vea#EEiEY=zOTz8^p5-J9l2LD^gHT^)B1nfLf8Ply1#9o_%Qixi?z#0 zei*>D?o;$Y68oI0ps5f^Z2LJRvGb9{woTJG%Z5-A`!`te3|H{^Ah7s(B(Mv2523Bp z`t8z+6;LL1_iw8ubO>w94uqv^KB?wJzo+6Y_;+LCEf6=A?c`?dr@gx|x0)3nh zy1}_+2r^e2oNo`I7b1TRvzn`^h*$5W_d~GO5L&f3PY9f)uqR+gX43E{1i?~XPly1P zBAyU(mI`=6gf9h5b*s@DREaTKPaaWmhJc|&c5jHd*T^}gSJ+6Ph*NqsODRt2H7un* zr8lsY=9J#(^*j9mHJs9ObhM3jF7jSmjm-0h;^XVjXlT>*(xG6wZVrVNm|`R>VaTL< z=j&ilPMc@g^<-tO3E|lP?kqQM`m>W-rJ+1YP0lx%>Q)YA%-0!11IBy(&eUv#nrYkZ zT*HNI2ZETt9*9(_y@M8WBhkz^gDUY(B2D6ssG&6}#QdMe7*9zZMTh z*u@0GE*{~5;Fe*DC^=iJaOrw_Fh?o8<6UYZlkj{CPx!ZUBshs|yHjPBL+;eEN706D z%OYHK$@zzF9Z7hbsHF6q9quaez-rlkG|=uEf8#oNFNjOM^|-|D7JpTD$;1jD!GZyH zP_Cw59BR9^iDcw``#LQTtj6tk_H|~jm&ekUX>FAQ_l*0?{RmQay*D})w5|;Jx0=vZ zkGNm{y+A&p0`YI?9=*=Z>v%FmVKnU&Awzighp@S`o@d&#UT#R{d=G{Nay`r#y!FI` zkO>wj<~*rA*W8^laU0JBb2mU@Y0&XJOZNkUC$M2PpO44dLnaVC;02GL=ioiS@(qs- z=DFC@kO?q}x;wWyJ?rxK8O15n?V1gvUqGVe;)YQ4Kk-b~bSAadvX2Yq?8Y;hf{)>@ z{Ccw254*Tbxc*n&v9Yih?2+5Flml4Guh9{Z#lNek^M8wTjvENODd!=yar>S!)x@3o zi-?N_w*E~7wf}8_P$rWSmXS2rlnzGlC)L zt-82(t4*<5$B%;e7q90Tbp9`*ECpik)GC?RIR!-LDtM0tb~~rIFFU7L90>mg{(qgF zc|a3a+vqbBn86GrAs|Z9 z8*VTlNEFe4R-+(B#hNPBC@xV^gQ7-7`OXB|x4qwc?;n?c!c1l|XP)hxdFK3{(*P#K ze3=a@{snjDID<3DPnUmwrgu`EKb|W+?Ggp5xwI@GD^dzren4kxpcO8+A{+Y6!bOZ({cu;IKw-EM3p-(T%-<{ zTov%oeMYj3IiHhhyJ(;YLFMKkP=w&tJQFzP%C1GAAi%P;Uok*hhP}A)?j)cAx7_$Sn}Gf| z2`&Ezk=;Bbt*HB)d*7V0pA(U$?cQFiFblIoM?Gd1U!IR%VOI^qU z1_!`lL*yHKK*x|}S2)8*_JDF7r)F_Nz}Ho2dOD!A0fAFh`l&K;1UUWqfghNZ6F@4< zviD@-+HH;=VUym}fZ_pHCj2)HXz;9i>C%4-@9o{`KvcJeWb^D8zaPN(*BN{0KtR4< z_$&oX|D97x*iQOPc!E3P*LFJk%eTBA@+F~6!1NAO^DU2FN)g4)S+VF&-}=94{whvLH~$1+smebix;gje zp)YUDDE(Yp3LhxD4$cS`szdU3sRhK0qprjB8bR-2>ma*u{k`=#-Kj9Rh{%N zc#8$NCv_dChuH#cPU)s3!T#oV*()1CJNJ+j@r_!-URa>l_7Tip4{qGzDM%cd`(5;v z=kN9qb~&p6^nBOpZ~VUFrtH6BXvijwT@DX!?D#`Jhm^>q8-UcSsk3VaT-G+aguIl% zp>UqGo0S_+!da;|zWZeDp~|DP0q(uWev|*gpqrOx`;(;Rzjk_w>iv31&5qjLb|9Wp zeDA`Bzp(kAQ_Gw-^!WM>^obp!b>EtA(N+BgN*iiFbp)c7_EtqUho{#3ef+JjyJJZ& zl99U^zG#QbUo>HxF7{4MWY0Z${bhH@ULAb=AIcNdnmCpE80q2ZC*7+#^dIBtc>8TK zaKRCN|0;|Tw&*eA81Saf%CXMe^b?mPDIXzcs{_yffKhWuryXDh` z)UUc-2#}Ur7(FEXq$f4A_RPg2w>!G=C*At|8}i=~#H!tVH#>J25PfDBxPxxBw499P zBt-#oFLt{9W7aPF$0Gi!Zd(ioMv;}ap%*{|zoZ_a{T-J<@rZYg#%^QsAO7mbL9hP< zOq}^@%w*C`eG))DfbM{002rrFL=SWqz57i>>Q+V%bT{eA=~;Jb>+Lbz2~gROM!5Hy z1OV8G?(#CtA79+qk=BjaKk&~?dr9xXyeB=cjKy7^J9@z@d~oAV$3F5&Z%!l*>GAx^ z7dH+n|K)O|$K@C1{G0E;=wt7x{#h>~`pUiKO~xM75TMsXuKs7^7Kw6@5a0+B0#r*W zGvcO79P}SaP)8{5O85&WL%RVp%clf@lj(2U*`0|IB(#$7x(&JS9S1t z5`&{;fIamThf(b>0Yl_ONoRPBYHSS|Uo6lI99E~hAFzW##G8aj$T`aTxDGb_1EJ#s z;0R{QZ5eh7>S7LwtqkRfWPnVBrEyyv6X`eUfWjfMxXz`7*?zq}bdtj%mXhoZWw)KZ zc>)L?iZA6!4|sEu+k@h$A0^cJI!mSQ1UO(d#p_W* z-NPL;R>BpjdH*2NzI+W>9I5%N{kFaU`UetN77_XzN7wK4i@rUGPK6T%v{or=ZX)~1 z$-6^(P#$Ah58PuLOrkv6Qc`U2WN^BIxh=&G48`VjlYTA9-Rz$nHc!ZLTq~hK@w#ME z5`vfm`5GEEjPLF2^dj!>slX%^uUA{<)pGHVmqN^ zmXOM}uCcc|#JOA9Ot5KJwt@@xkjhr(*hSad`QOTZRZS9a@9pB$RZ|wT=f$(SVLi%Q zQqzTZNr7J{IV8y{jJsE`hQi!7M;=))`>uyOByes&p#|ibqS|pZ#pzs)SO`nKT2dnc zQD#iT)>D|s^67El%%gWy0DX%5(|*cR`zg}yQ%>4XX-uty9CD=YnJ3H?RiuCxGF?Wg z6EH__6kDhQCQ>Y+_lftgtYcp&KfGXzFzvZ~P>frS#4G12l^PP76C$Jzl2E^%pY!!p z>fnT&Pz4pt^A0Pa4%1P?4b~8z=vOhs1MkCH~@pBuS9F zDDsm6NtPmW6OdFhvPg?8&?7P?x>$lPjYF5q&z!}wUG7ReWW~N&OjE)3G+k!d~ zMag%fwxhY}3V4gKhzK_dN5%NlCeGbn-*>`3X=+R0(y8-k)4bG?{qvOXi(s!phw5QF&J?IhFEyPDve`fI0d|u1>{mgnUj%`Lrl~^id*nB+pXEvqtipZ>i${##xIF z(AM+DHk@dvr$}t|+Sb!%=SGUl1%>l@t@CY#ON+q0S>W<}0{x+aZZ^9-kPIfS(PFv>a#&jAP$^AwpS}#8r?&5ysmWt|xD1{Rj)B$}<-B(nZfhh=RHPd83 z<;>AD#i76&)T&Ysu2kYQD)@VB6EKM4j3bU?C{EOP-A(u#2=R?ppA#~7Pt$O7$(Zr5 zd9^R)?{mzqya(-m68OA2PdjE+A-Xbe z%*uj`OsD6OC1Wz(+uC_+G8>(;&Z8Tf)meQ2IsK-{6rIiOP?|1MM_=D)HjMgnOfLWT z+_^3D&}C9|34tus@Nc6?q5zpKMLu5JuJi^fmhV%tGe`rZpy>iM>^eVNz-TwqU&!bd zf-l$ef2Lq%dfJgV+5szHE1~U^(MmM5!vfkdGp$^J9hYKXTxq_=XjKAMt-wxdu`B{R zZN$!4F*C(4Q|h;Z39{()hjH}B0>70KzYGoi89{He(ydzmH%ykGogx2{z<3|$_cp<= zL&5l!0M2RPXoGHNW}6{pL*SH*bqa%6-2D zbsnj{o=aRkW%oT6MS3oiCwi_>dak_hDZl2KRpgoJ%GUa__ikpFM6&lMvJbYiON-b? z>e$E9`_13dFQua2!t4Fg9(<2Ud?`H<;=sy6rwX8UTS}b|30vzE0m2?W@tpKppKqJz z&NUra*7775^4bP@8|#2`v9*yt01g74R0(lTD>-s70kcubZMzAFll>NWAqQCJoldK` z$%jiCE)x@;>#kx{i5E2(#G7akc&J~^rw#!jhsgO_2BJ<*m?meamRdW_ibNN4lZX&! zW*+42)MRCXxB>!9T%QS3<|cm6UO4UXvKd9lf`qJv6eQ7zWK?Nca02L<ncJTTcw*rF5Og(b5eBoFThXG_4@)#J*Frr z&Z|eK3q}`qDg*pH{;4DNN;)0@o8GW(KAiI&17b5_PikRTnYf;%qh(dV_mbt+kN?OBKa=I zLsyl1j>|S(%;wrQ3wEuU`JD%nV0|>^;yUG81i)N#B*;8HqIG32&>}y#dnW5Wvmx|A zI(v~B*%!}F6|naf@fR!Dhg{JmTK0+r(zCJ%bd?6pFv?Lmh4!<6wwFo!6i+)KB|YV! zl|~S>V?{L5hRn31z)hH>*Hpw|rS0rmGp5sGH5%+Rg;O3$e`LifAv%Z!z-8_OH%Xv( zrqinuIZw58i-7UQNPk6Pyks(}m7KQ;4ANa>j81~_*~tG=#5h^Rk$$PN8=r$ts$S|00zc@a$ll7{N#R)*#5+*x?xBX5ATpCy zhusKexQ}(4J9FxH$(&m&`Cj(OdJIgKd&DoVE*4Ods~0hLq_qKQ?GUXEwPWGI3({4u+RAI%eLQCeJjjUGJ*ST zT9aOkZs~XVaA~OiA;r-1Qwel<=s)`Ro-WGC5kK<;=_L#nHIj=V>qWq8$Y+$PB9L7mTCi1#d#_gKk$ zUB`O~@jpiL-zoV~_53eT-#+7_`h2766Z4=?+=BqUU*9S9eSb{qJGHp)Jh)#{RKGM; zzm)oZ__RGpSoO9&h}8*l!gEWI9O!X%uS3W+2#(CFZaIu7pl{lm;czEKq%@~T9!}I^ zAQA~ZSV8Txjv5yCIKi8`#(C!;S))0Os-_nY)$}`DF`$t?D7AIyD=KyOG|&%Egz(a% z$UJCvb?;-yMabFe2p&wpAa}-s$o@E^0+HgvGK4{k3M{A()ZA2MGx0MP#6u3E!_sJU zS*UZLcyQMq7m|N<+EQ}pl&BHZ*lExW5>UTlf}tE$l8`faeoly#Iye*rQtHqQYA6V* zChBmYXMN5v12x{s5dd@%U??JVoaE&=CDd`U8rAwt@s+Kek^zQjYgcO5u9}+rT|(~n zGRLWE#~+BbKWKBO$%)+Q`rJ(Y+AOalj_u}$ssU~Azz)^GP8cjr6Pia23wNDDe=Hfh z?c15&2hyhpfaEi>2Sw(~kYo^n5|A__vWP%Z^vF^Px{Qg+1ZbufU9Lb^XwV`GEz68% zThVP+2v-0{NB^tuJD(RdH!cgvO${wJIOl3Zc13vsT5UWeTlD#Q6f&B#Mpe>wQE_SA5A6WI4^|D04#7z0k!%{LK?& zl)RwV&XQicbiE1^>={c7p8$JUE*!pgm{3L$np6swHJs-|Km=3~`hu8bNxS%94? z!D_&S76VobW~G3+88*!5coQmTbVM`WPG!8C&v>7~u&Egz3K$5x;EpXj*gx3M|Ennf z!AbsKtNg=?{fE{27vy(36|7!YkT*6TKXx1$0nNd8UPk!xK?5F68t{1W0LzvE&ngDA zT_5oB^#Cht;OjvHJ0=Z$SNtb-=HZ4jZP?k44QIg;r>pY}dCr%%ZDp3m=WWPEPU{|u_MKg|Eq z#^~y}`glYaHEIU_p%RIZ$3HoC4Mi3^S{y-9nTOHtv!;ULJKE^OCB14y;yb4X znH%+=^aAG#QfGtNQ-9ytC~#>`a4~7YGYkEp*~JW=tibb?fbmTSc)-#!IujT+GeZi1 z%|!R;BKH`QF6=!evG>%X-ZSp^PVn`ZLP+bur#8&)_@#=db0u+4+>a1DPZ~HfQ&5xtH8kXbw3%NeHl#N|F7D`n=T`} z+-q`%o!mR@^o?O>-V8IlMLci|z~XTFA;h!#WdkzJQT|1I8d-RL&B8_`wRue{MM;jW zT9C9p(P@?oY1VHR2`9F4us8OHk6_-zcIl%;-V-J7sp}8D2akPLp6tE+*u^X(npwFv z^E8@uer;Cs+FRN0()pwf*Y(&is#ZjLtyZh^oYrkquPby~uZ~<_VpvZ^Z_pX!8}gvM z^G@CdL!L2uqsaiCQ@kIlH?=wCYa>B{w|vFe0`;YWZO|{<*8Nhr&RaWnXUV0V#MoWB zOS|?#h3D55Hm+kE$L=;=+HD^DtK|~At=FE8b$f`qJ&JK9>dPgXafb>nAJUHV*|+}i zQQ8qxYi4VRA^FZ(pFKPGRXm6{_SnF!^<4+qJP8-M9ni^`)Lw1+hz_pG)Yil^Gh%u-ex!qo4th=O* zxm?nCnTP@F(Q>*SM_1QqjJaZJykd@NvNUqKeMc{^yQ-LAQeQD?Cj8N%?Diji$(7s0 zggd$`ck~llI+b=8auJ%Y{Ar$W*K*~qb%L%D_j{=E_&vi}D z^%L6+O>Ns?yDM!9G*5hKX?kg$_{!E4*h#m6@^adGnL#x%uiBYi!qfRaa-eNMRHiqr_&FxCI0ktN*o;{SRFiZob~X{CZH`B+;eoqN*)HH>sn=H@ArYWDR(< zWxxy8z_(ilerAQ{ZVlb&7PftB*lxF>`?n4~<~FQq>#$n4@ZYwEU%4^dIC=Q}oZ)}E zMLgOX@xpDy+pQ!1IESC8LcU=>%9&oCOB-V%v`7-5x3X zccdKiSkOKaZ*NAtJV$iwSoGN=BNrc`zLl~0`<1`kTB$rgLR-zgiJXIQPaU#DE?|{= z8ImL$ATd(qRp>CzlqA~#4M0wMS(1ni&_JZt%a&x_0EHs=z0|WMo1ie{kypWN*(PWx z^3}8v6&4;29 zUvKps$rflV($~9S4wJzO@Yc@JY*CIwg1on_9*Yk;i7b)(x8u-!q{v%6PqH&|T!}+6 zvfsO4o@^(Sf*kTLnMdq|79hvGb@Qw{p@m4fw_%=S7nF)rd7I|Rc0p;#NpH(MVi!mz z)_U9KS$9Dh$bE11Pm*7umB=ISf}do+LaUIc-X%W~ze1VF3vb;|)?cA4q}|)_lVlGx z*7%c4+#Y2%^4{D06TuQ~#)YR4xjZt)kSsX}?M9ZdP02>Li1dTdugFSJ)}1&A?Lo5H zwq)x;s01lut5YP0p+m@ic0r2lFmxC>#4br84ns$fV{BcD^)Pf4DQ6o}BuAiQNEO?Z zB0B<=A}86F6ygX}hSaicDb^#94!O@(FOZZ%Cy__&f(5ctDEbuglwGobD1~Z}7i`@E zYbkUZX=fW2NXnpE@4&J`M?25>9bH5@|j~>WIYQx zp&_pIN+%RlPL{>h*G<*r;<=}h0(sr|lEuVjCqE5~EdI z(_&czGypxxwJatYpn+&D*S6T&0EMFWx#}g7CMXPj#4T7NYl4QNPq`&ah$d(l`hu%l zVr_!L(RQw3iR3CY9DU0*EsG`jt^gB=*deX6%tU|2z}&JutIhhT7^FKDOo|>g)-3>KDrgwyHFO|?qgUX zxd&yVZ+%QFWcQ$-(GNbB6~sMAj(+yBt+3vM)Tl2{ogsMutwa0r3NmC5p!H}VuOx$b z0Bu0UV|cm@>jNkc4dxj#B(2a!bTH49A!~&;pP*Q?=p_1xSLiscAXD}dI)y&vm1Ghx zp&IlBPnT(Z37tmUd4^2ME2tKI%QI!lUO{Kj4?IgI@d`SNe&*RSt*;;h>dRMWNnS&j z(7ybFEZJ-5G8)J)$s%4u4XBu}%d)>l?^~Ci2zUl6TM_=v;n5w(K2r8%^eyWE1b8J7_9jmu-CqwV?A{-z!_tC46Ic z-22FBCAoOK9^B7S$4}@BzD{oa0(GJwW6;hw$R%CSXY?)KB$stTU(gSHi=60!y3o&j zo7~z3InjKDYOO>C_oDR`7HDNEm_`c}mS_nTjM2nGoz|*?ooT^BgI1CQ7I7FXG-+iy zFr5}Av}lPOm_ZvZv}vt5uoo>+sFoK=*1+Dhxx#`X*&3KlOBR+C5o=%$Emf#1vaW%- zv~(en`;W(;MUdzJ#}kk$h*l=l7c)1gM6}~VV{zODm6%p7G#B^PY)}oL)d;P{#to{0 zw6j9RUgjoMD6Lhf*&DY>6-Ij^)b7>bpUg;KdDINcULqeJLHjJU?G<<~Y0rnFX}*E# zeUdHkSX$q}f_<_r@HkpvV97pW3p}1C4%F?lZh>QH!GVT-lCAIr+TcLbKG{}yA}uV? zvX9sb$I^xe+V)wu!c%F9f$IH|9qKUN1kKrTV@ z6+z}>nrhWuT4s>-n6X-Qk0uXNlrm4LEVR8rn$oyas;9JrLE2KyDb+LDkszYfcuMu0 zRu-f$W!9+LXvc$$rExW?7qsdibE&3A^^#T-WGyw;s9w>|1}VyzXH+&?YmlZa?u_aK z?MaZfOmjx{k@h@@C^Mc>eWJaR2kFb0XH}iFH$ld-xU;IywD&>gGR;}l7uu&FYnkz^ zs*BbYq#&5}Dkm&6xE}6>^%WHmvU+${ha(LO6qOJ}J&a*uk&dv|!_HW+$UsQW!7kWf zk%^FWVBNfnBkRd@up z@7h(>2&_w_sAOJ`N-kIvjrof;m2uZqW3d3Swo-FlH4Y0B7a}S9)}$Oo7F4cTKvWv9 ztHxsk#rjI-4OI;GmDpGrcSAJ+cYKWSLY^^fhQq98TV#NvOZPk2iuUK;;?zSo!J1Ewk z(A-v~U`NEn3FB?m0<27|Kf%1CT8JGN8&AaDQKe$lV)F^j9aS1uBeu#<81JYSVfV$I zCn$GS8Ca`WQyq6#wGw+G)>doos#amo#YDC7t|}9ICDvCn@2Rq|H)3OT+&xt`_FinR z*4$J5jC~SYtM!wyp#m?5Vu{8--s8ZvLUcs>P9X>l9yW+vk+u(tjH2U&x^ex@M2GalFv|9RBJ|` z3TQ{Qv?CHSm9tMlJ0zo3#$m@m0>?}{F2KscyOab>r@^ZASdA95B+#E&u?I}LSxSE- zp}(du+9ixOg8s}*e`Tb9B;<_FIL13E0aA&Jd_B`6 zJ(t9LW~6&AFY;Vj?wL)d71?rEc5x(oZz6ktI=f^u8y#9j83Nu{;u{r6l)Ur*{ZO1p zUIElFk@Nh_r03<8ZKz21MAfq@S zi%e*W&L@cs#iGUsBBP%e&}*h*amxd-*>AwZqyd)V0c{TkSp5ceBn`9`5A1v}Q0gB7 z>IFpa4T)(DiSr*kW%l5ydk4>;x>tu;kWwBi?+qi=KA)y{bWY59?I~&p{sBR`RDo9v z?uDWi%*Yd!b+5q`nK2I3_)YcnLi{$04x-kXkK+s{^a9MZ;kTU7FlZO<+6xuImHzmM zUZ?~f^$nih3-yN0H}Je(sGLbjcp@c##VtO*q`&gOL7YQFr5x$OSaRq1=*jc_po;#H z-5X0Ruk~5@avJ&_tZ2uZXlN7tK?8CL%wL)%Y0_S%KEn@V=tx)n!Y(~|g8w9HNX`~I zw-x__p}w%T5%+aQqu~S|zQ7r5yD!H#Iimw%O&5OF86692!f=`kT1dC9VF}0^Pal3b zLa`CAbU}ld2|I@qT+AV70d`f4 zqTh5LI#x3=246!*XEN)~1}Mn}4crtz;^R=}Rs0Pd9l?j*OYD33rq-AX{lw#cvrYIm z_~$yx&mFehCb}7qXQ2IINgKm;-_CEB4osrF0gWDmz6+gPbcy-`FJYnI!kXuJ7YpUV z(o~%9hOU<<^s`F~EHAKrgyilByOhXDIs4kFXFI74;7VsjuGeEujnzMou{UlrbJW%Y z4oMC0Fq?{r6p!bdmZp}o=m)JWn1KDReIypJ{8+uqzL=7ijj zD6uPsRIE9!F3RM7Jw`q%SyFfQ0e+!3Is&GY;;082$=J8wF4))^u@i#c>aFB;O{-qvDCW63Vfs|8pS+v7a`S6lNy?Y1^7qO8$jW++3%${qROP`6TV)bsTgo-HG0p6eqAiKbG_W?!`s(Vd5Bmh>M=3 zCxqM06X+|K&d9@;`=CkOya8;xvR!*ePd?NkOWd#9M<2%9}3Vs2rS^@i+aI^*Z2lslt+!+ zzXCt%i^fx>&wY3J0r|>y;@|n93Rv+PH~FDbs1Wz|M+3#VAE;OC=Dd02;E_dLR2A-E z0NW#=jTJ1(PPTpiuU%FBBNC3`i~P}Wp8H`2sm`v}scJul@oS{#80QH0^%0sQ_$7ao zGawwvzud!E>cY!Ao3!lcead7bTj@(3X|2vtT|CQs}Cw=sy|Xm0Lq6Scj89_(0;7;cyc`l`y5loBukJ|TZTUjKm%au zF5E?ka^csX@gN~O2^KJLxe(k)G|a-wh3KGRaYMR;i#hstj-V$(Mo650ZnovDXX(ay zkSZSWd%zxR3G2WRG+b2duKBD ze&$s6(xv*ki+V6M@AAM!Msk7OFr8Tmz3fTg~6Y;XEN!TX2upE3suS{}*m1qliAq*Voina!$aSp|a0EvjdE1%f}|&({TltG86o zb{7cJZ>ydihHI^=XD8tr{P`KKwW*$6EfB2#Sw)?lFPLyaWxp_A(0W0IPhFWWNWZB1 zDl1Q#H6<_i+8suXYY1%F*tB{2#F zHyTu-eF_9CuBqGx76_&^s;FL2VWX=>i)znG3f~s0|(nIzqpEjr~2Xe{|2`a`j3n>(e13h+2 zdRmZ=V}sa=N)2_RP}~_-6orJsc)XGwM8D`*>r0<3Op2lKSqw?Tq)XB;nO5(m?4oaJ zP8)~EIz&zv&z6=}arEwtCe9hD6&u$})_kzOGuE+;UZWDrWqjVs zzgE`T^J#ru5?L2bXJlApViiZ;pHar%V4u(H!(u7qF4{U_R4IAD@rf#B$GU{O392|7 zc{zG78s0U0SVL3o!(#Y#7pmQs$Eh#b-u8TyH5-* ziKducTEq2QB8=h}7^}kuRLi`vLxW?*#nQKYPnjX`dmE{aGO#S04(633b`zQc8cY%HdMd zj%#^2l$wLqVU$RRT#9r{qOi}FPtyjK@t0rXV3lRrs{Yh6Oun9J{CA!;{9$}m-$+oD&7oVIw56h=hbJ9-UnU6KvK(W$vtjG%gEROm~ znYE+!OyYA@>etvo^vL->Z1L!A?oV#6pVl96MuCxogXQVR5pd;4UkIZA+)? zDV$W_mfd1J#d`hxYJ*6`WjDx%(B8YgT3DnDVhjyd1m)c2`3SgNI!(x}(`j;@snTA` zgxq_SLW#LN-xF72f9_6Dbn$$GQ9=KjOR~uK3yb(;K%hN{4(Ck*!F7_Ixz36V5Mcbt zBL#Gx$~|^_5~$Az`>sneX+&y~4B3Z{$@!D8m()>qfdi|M1J8t{y#)7CihcJaD4r7n zyeqQeNvTB!B(Qf39nPB^P}ONcK(QJ*@FJ2FpbS^up9&7VB$(1X8caI_IVqX^}wESUQ|HInYvP00G5%>zgm);Vr`hORk*i>ttmA3BCU(v4ZYZP0D4i?`?aYTap=aU zzPwr;7(67p(cn#MXQ!nWNkYH}w-EYk!Uw@s{yy+wGxFi@64D3R2xTH?WonU*^r73A z-y+FNv3`Pl=nN|%eR#=^UR>@fItV_L_kPQYz|BJ`)Q@M-fx$Dv2fjbe%{Du=h(8W| zXb+*kC45+ORk9y^C_+BGYaxBm`78I>RzuH^BjOP4$A2QpPqFSsKD-EPA$|CpEdd|Y z$cIk6_dn!tj`QQa(1F2w!UstJEva`s3Nf6zR}R5fs2o;ra>2TgWQ?5x4L41Bf@#*s^X|D@X z?%Dip0<|w;-&H4*K1{`nMB_n#IzFclAt1R%ISdXQM-KE&Bn2d6l;WSjfqsMl@2af$ zDGXF(pERBh=gl+anilL8Cy@jFKPT-eW0m(C!2up&FS;tzy~1JSMu0%&_?-TPKk?3_2+RDwtMiQAPsK(B3fUL zYC%zobqx}z4(Aa9v@x7i5GVzKWPyVpX+PJW=QL}(M=)Wp?9X960{ePoe>q%8*egRg z8DQUXScn%Vi?;Za0;&GI!E?5I#1H~BCyr{7CdK+X5@-mIBm`(vIO{=x@*_A9?+{4Z zXZZ8H=WX{$CG3?h9M%)Czm4o~hD!*0x?V@LPKQKL9!bu6P0@Qbr`0Th;D4~=hsVi!Oax;&>3E`rB@@2keZZgEqdtc z3~|cWPHC*)hOtu|y9Q5Im^!&LC4hAxfjZVyoiZ(s2F+^NBJZ_H=ioZI{$y}wxtOK+ z70(^l`lF=W)qonp-7yv#(hH;OwbO>L{GP8Sro(d1TNLf1P{yz&glz0H|AqsuyHl*+ z5{>0gR%W@q0N*D=V-=HgLWsr&H%qEvI@sI5kR6A8O$=GxWO)AndLTg&L(J)}CRyZL z7{jq>K24^>dDEPhG*fq*HF?a#&wzb1mPj!ho3j%oM zMKERw0a^k_RSyC)kwDyd2_ZmJbJl=B7ZQ+uJ%e;0Er3@og7T7N^RvYeM@wSIEiR(O zK@cjZ=gc62Ah^Y^g&_E#AjEHmAZSH6jG$3#Idc~mN#daE;b)!p+h)plDb^{d_N4LZ zuY28$8rrgt+m9Ie$|6o9xDj6lZfL)r32q=Oku5yI;YONl8k8OI_)aR)fdjURxSW}% zWL2hO6n0ABnzH-3{Q62s(fv572TuI#iD{OU{0NWx8ALx^58ywE+U`;FH4NGEz8IWG z^-$Q-KrIO9kbrnbrUzlarkQf-$HXETX@55$^2Nqt4+FAq=$lB|uMMmLd&yO>-!o%9 zP6}w7wZDRZffVQrkiG?hZY0pzS5FF52X=t~yFre{iXhdgxqCuU3`r;=}t)xgd+uRrK ztp;|AbuH?Pn(+e7I=@W=(zpuAY3yPA@Z#iP%_=)0EQUbIaR|!|Py5DP z({Un}6^Z6Vu-Onk89Ua04Q=WUj~ETKO4Q#KpOa$!^p|#nWSH7#xcEXT)v>A90V3=x>@u4v7NbMf6A&aw)N|qB9$wap28~Xy|e~NpOn?cLwUtbbjS`G z{HhO~uewEjZ9GM3_CU=D=qt{QrVNpt3p)#n)fF+Q9RIoE(iPFqiM2M7HCL>AO~DS{ z>Dy~cXlZ}{c&h_L`b*oSw4YXhM$Cv;_I+xseWE1b6}~fO1mkS2eH&QnAIVRv<)A1X zv&TCJP8#5(xXtEs)fJJ|gtm=~R8iJHe%D`m-Td~fw`R@{YU6I51FyFo8_&_*8BBN! zmNCvbvwzB+Uq*l9n;saaD66fZFKG^fLNTzbGxp?>xm6 zxbLqe;_S%$7R4DAJH)<1#`(Q7PkFb_{#dO_k80D(|6wqXt}dew_lwcn4RbGz9yy%X zDGk%Eg}54geH~1T8W^v9xpr+82YZYHhF{T<4UVbQpAC2w7{&D?deXziuwzAf8Qljd zj&Rr2dg*Ix?Q3gAR-V=D^-SMsaU2FTpk^*FI;eBjIdUOpwM*??X;{oUs959|02P~h zpj;}6ukw=DeK?llv!PS#ARzQQR=~8my^&lel9RQaC*^RLCm`2=sY~l=0zCMWvmKl!oacw`h1A(wrqZ2XiKfVx=v(uU|gnK zew~;278o6?cBkH|wJ(O#c{BcT-CF0RN2k{oIV*0lgRT$y+pdhAx+PG*&G}-z{XQt= zsV!I@=$J#pC`Fo6xmZcyJ3n?vfpe+e&~&P=Gdtw)j*u!tQ^hCn&$F!yLw#SX3cFSF zW|hJ9vv+(4Z-%47?iY3%O&+EgmdjC0Zv6$j-xRRy89Qfk^C`JAH)PJ^z%`=om03GZ zpbKe#FyO-3Tbghc=Goi1JQNk4bqX%MOl#F-sW2yvem9DA8F2Qz!nbJ zj}BTVILeIcVYEK@byW5QaykCICiF{eF?K<7_)E-tPV-O+u|_ses53b9_xmY7O zRx5MZzpk7OKvy0-Tqo*&F>%m+%FBNyzUOE-CtA8T>?55DXV0A_bYlyLbgk)^2nm?E<<`5#R|I{#xi#s^RXY_Q8`L zu&f&+!~Jgr;_s;~#Oh|G-#cRmy{;J$(_{49JQdTua?ZV*XQ6W@?N|lM>#`D>KaN7v zQ}>6v;Ci<6E4Y4U*>6zuAEyeS=CcoScf*BqtuN4pGzH!ZuAF^|t~~e5?J=xw94l=5 zSFoOkWH^5utmcrU9qh~PXl_9ehAMvwhAs+eZ@xHO^3hm64%bJH;D*#SWtKos&vX8k zdLM79Shg8_cz)6qU3rjl5U%WQ{SLgVNYkPVXTBZ^7oLCjJzTgqE2)b6`q(Fd?9KD$ zt@?S5P&*{;J6@{}6BI8UPK}^NhtgFsPz~=0-LhFqk zCTE&%{s8x)!X2e>J}d1E($f5>#)jKoI0Wa<&pL+cNWZE1O^uCpSsiSOcGz&!og+l? zMm1mQ z))SI+nhn$K?eER6&a3$a;o;K?9?am$dW?}<#|n2Ir~HN5zL6Fk*cffDz1@9G_8S<` z&VIPgC(}gZh zJqAH+Ye>><7Mp79AGq278f{itWRfYI9r@8M@W#o)*(Y=#odVOYQ@{ASpq`)H>Rsi7 zv|j$gi0|o7#t!Tz%-F)n$Z36yy$(~LEkmbCyofnCb=Qn%eAI$z(1LdNlTHnq<-KWr zw$v7Jth#IN+w9c1_JzYNO|Ou+^IE>?X7b|br%>5AH`j?%j9roOS&tbG;mG|wCOo9Q z>4Nc3bCC0vE=}KrgEp~7FJrElo7=4#5IA^SVK-~un-_KoL`R#SL?IbciylH9G%f2* z%{H5uk1B6>zc^B@(HCM4_=Kq+#XsPwdmfLEPHm=-pN2-&>zV-D)Sk7pyRXM@VRaCy z-3`XwoEuo{hk;?N7N%#r`;bi%G|rkfGkZQVS7kkdP`l%nPmgI?yVuHkjGNd;%>#Qr z0&lzt=sDs}7P_qem+4<8wv|FajZ-hn?>f2_6VHR{H#K#M(5g=@ALGz?Pj8_y9>nfd zNXB#Irlpr#=G^=AqtSKa6xt)y4Eq{$)aE`a(z%k6_w- zB^IbC!R$G<{dm=Ux@b7lhRx^E6-7#`FiJ&6sg@-Z$NVC}>?oB2ChTo>-;-djbjmx1 z4a=3%5)?0_>z**+y!4JlKZUF0QVu$M#mhwe+{$3}f^;OJT2?Ay$-d#>{roq6#mD&U z@I;qjXD>kx#!3rB_Lq=7_ZD*Qof9}mnTO13mw*{%HK!jNQytlD*%qY98is7fd-!(fiX{}TBW%0S}|ak7IAokHma=T{1;qCsH;M0G2Z1D+AiNCF6ars9%d?6f?uE7TXc;cB3=F1VWVGp0^UDO3mVUU z?pHOl)?P{;-{?o51;?;_;E#?rnG) z)LwyVPp2@6+QbE%6sc1geK`@inEBO?+6!J{7u0rVmXv6_aPfI)d(H%CdyU?OZ2PFd zL;A(F_6>xde3RW$9q6qTLgX{}wdTPk!@l2)UzCPP214Xxn3-hc-?mVgKf>!&jN(mcK3(X%$G+*tQ$UNo~rDa0%<;+kx zZ)$$c_H{TkzY8^go$f>Pw{zA?oy+K1MDx`t)dLOC{6VPs16k{1v})%m;dmDA=DbPVq0 ze8M_FbTuX23u-S%weM35C2Kzr=>IzqKD~&ZIuH^36dgmPH}_J2s43FdXXQgw{Olnr zhZ-O%ru8=L2M#`UeXV^t;XC$&-4^39^mD1A0aQ)>{CA2wtha4~ey$p(3njXRyX58a zYZtx5UWDt_+p{DueWJKFQ2U>~sBqp?``gw{Q2SU^`%gj}qIUUVj!QNU!<499e%NkK zHadpdYZO8oqV~beA+S7J47G<1Zy{?hT@uFYy6B}JPSpOy$(8XAMlGs+ij4x!n`+-T zdmGfg5Y=9;CTqvaISWO-%W(Q>qB)c_y3=)7-SdXX)4f@zh{zi<7DMDqKZVFYp^kva zqnI?joE1`Q|H9n4E8l(ByNrGo_4pZnSE!Z1t?$yQkzbm*Et$OT5+GnAPw0As&3Wj0 zJmgqV0`GAqWwf6g}h#`M|L1(iwMYJHC>Bh7K9FqSuii2es8 zGP;5gnbW!rM8=p!nll%|7DaE+xNloIdgm_b6RDC*Z{m!cHVnqhnCnU+8tT=@AsTI; zK{RTr#y}Me9Qsnk@O#Ku>>DNOO)PW!y<#ju$DruiyK+peeNOeydVO5OkeQ)7Bv#Az z_C5A#?6n~?(k%_+)4#RGgxmS_i-MXW=i2#`6^S%A zK0~+j45s&zk8S&wKPxS=_c-fZj!NVtJKY_=vE89u1e-n$1NYj))={rF&c;LTpzqLO zuJc#8HS930Z^+X|j^8k3ln;M}?pl0(*f^)KjFA~f5lx~B~%T6ZF>UF8FcleC* zzeRI2X%Dc0zJ?i38D9uoTWp*TQI~JF2~H~)yY7Ip-Apy>%h@@_9+VC8UX!|+RfAHj zLzA@a(g4l52be3~IDUkogxDzNv0GBgU8Ts5XZ;n47uBarG^@6G2!N_HZ4^Iuzx@Sl zzkQYjyL%C#{0*Vh5>fI3(>|ciz)+rYgkAk1u&W>4KG?jguU1ZXPO*+jGViiVZa}-X z@=EYv))wxhcB-wjdAqz*&rgS)-m@qGTVpm5`|Yy#jBQg=tmBdh3hBLSTLL-;52l93 z5WFAFhy4~>K-mQ?kZdR-Tkx#+(QS!EIuBxt_SuM2jJWt;4Yiq1+w87NP!topliX8#i#=2j(oJiuu_2eGt^?9oamZvr(h%4a z++OUVMe)A5AapzFTQEC*Aw(DXR${&51I0E;D9kP0=21?D*MV&tx-`W)4K;3YXz7NG zrIC5CZVw@0>@+|~%=Cn<@)kI{7}c5)>Irc+b`GsSJLx<5wgkm4VuM+#7p3eh-saJQ z+^w+|d!|^gO)AoQ!XA4f?pc%XdHES~Khu-_opWkRin+rmv>5gp@V5*3+qGdg8Ot=G z@K9nA#fyk#nlRWq369a0y=TsD!PWeg<*vNl!s4`sZ9C1I>=bs44}Ul8@~($nURSSb zH^Mi(Td(WNO;8*qPIT)H-4N)byb^<|lv`~cFFI{s(+#;m-`JdD;Ya4!o}ji|bGJRAICafQ zaaJ~ep>+qyIkZz18}BpLI=UtmC3<(dm8?DGyv;+q5e8MU&8x5AukGE3il-gzIm*NQ z^u3CTYr{au)!WeG9=y~%LL>u~{eiDZ5S}9 zf2DXWzAHg-jS%SmSyn7rSR_FLZvJ#QZ*m~@62AZ(2tW=r3P=H&i}L<=Ucub!CaCY9SB^YZefuD`LH*D-dw`x5O;Qw7i}DjC($P`o+R#p#CU=s!U&YW zq`O75qvoP4RT)_9p)PkAV%xZ|*rNm- zHr>T=_Crw?+z4)?A$g0N`XKzi1a5+XP7s3Dh5|#IYW^Pu!I{KO+nW{#p3O#{rO|>& z&$h5*lfIwGFAZa;5WuB1OcvYf#B;UYg_!-(D$>naUu=8TU=Yoq8n| z$5vPP^UALOVtqlk}HzML_(%^I>M$WIM0K)wc&6@TwHNjgepRq#$rsW|f zT3h%x*^S+RotqeO;@8Lsm^5n;Mh-qGLUR5le2BTK1MHl-75VT+NBY3`2bk7~h~vb} z4F#B%0tojrP|+kVu!BFnEgm&>&1H#FR(M{ofC#fw8 zp!qso0AEO)Jl0R(t^nz`NV+?$lV~38Ejty5_LZXcb@%3yT;5zip4xA_2MeHL<78Ae zvsH9pz(Wzn`96$7z{bk|$&B;12LOyq*wb#aS$Rf=9PGoxd4xU9j)SiU2PlYE7T7tE z4rKW8_5}frO9&|2*edHyKsF)=J_{ET0!jzY1+W(&d(q}Tr2Tq7-jR_&HWK!fI^76RFb1meOagn*LAxefMf zkiB$sf6~6ek5?&y@)CkCrHlobw)@}6I4=C(%($LyWWI{bJyO^CR8l!`)V3Qi=#$k8Z`$ZF)rMz`NoBq=8UMn#&;^rZun*cYmnngtv#C%sy%n3ROPyZs_LgM*7 zGa&NE`?->{@cb4}dN|m0D^hw-R|5L3HFLI|Qv(dpv!f6}I@0c8$AXRX^PF?|#c3sF z-Mye4uTdbrISB30az!|oefqZjfnHRivAP?wb5Q6FDm3~O*;vCw<>yWv;MioMv3LEo zo=M-NSa+euJ_#=;Dc`xAXE0=25EfqJfUt1WCtscLg}}P)9xYJL^b2DYN7eBS;OMWA zz_aiMLV)%^nQ@-ez3m=dgn)7kd?IjCGvy>~-mc(809h`yEVBOd|%F=!UFw5~9Y@D2=7V zdDAE@Z5CaCxH}=JH}+E!GuKX6irJBii$v3jIL-{}kz> zBNA@b~@CaG@TxIP=4qr?NNK%wRAkdBk(yV5X4y@uhIe+5+i zAdP*|bV~;>%Q|yqqjquXB}J6kXsXZ1nRm+PLCbxHE9*Pw=t@$SxTD% zUPTTEud0)0lWQVePMhHZe9(a>SL@J=WDVTbgLFp|s2M0O%k3^C7xCwM+ro?KfF}Eu z>|qT)hea3n_8M@eSN75+Ek1_^*uV=j)Ngy#_VO>IHS#Z&hm;wo>XXaGoj(5X!rp(S zby|$NKvixt%7E#nXZ(zGi zy>m~qgZj85#`O1gZ?Wrl7rxl(G-FVk(^dO&L{T5J`5DSF8@+jFfMAX|Bcq?$spa-9 zy*+0S*Mq)JteoDp7|*~ia)S6iNCZ*kKW_&DyvsL6Z~aG7`4um51z4sIRNFY}fXBY* z&=IP;;w48X!zJ&YEW*osF;jqG*7hXD?>n>q`L5Rf$ra;DIL_0GyBYMF;RqcECtkaC zn&EGm-iEmZ1dbtruBh|mLi|H#zI%`n5;twh$X_$8FWHsR-y{3CZZD#IUXr_Q0WQ@* znG07#=XH(X1GdXUTbPa&OGFT6SHC0)j$ri#T}Q-IV8t33=Lv5&Qhbx2?6wV}uR-m0 z^YevO^Gkr~!o`aZu` zEJQrZj08AtWV=mUMhjeIfIRaNx~=vdN;1Yz(hcbo&)3->BA9Red&WNxyIZ~Vmr?&; zW&uFn;A=v_t%p6hhh5ovTp@=Bl=O#B#NMUYnRyTbdI(iqUTb2?fyI|G2D|WJ zUxh*<;DcRU`uo5!+8yw%ys|Y&iR4O=oae$1as!x0L9Q0bRp-}|{p01ryDN+ml4@Qq zE`xmF7}V_i6t%=YN#}COx+)HiJ%wRPtGiAT>D(%PDQtIqAjYeuA}MTmNRWWoFSPEu zQ_e1M=PZhQ5MsN<#?3;pfO!_&d5hdJMAwt<1iSR{_XI+I6wx2DD*2QZD}ti5ND!C% zh1SDpM0k5Adx2lFlN|P_YQ4k=2!A@TE5FpQj2;5G_OHEnvc~#Fbs?kSw^Rl=ErHYb zr)Lw4w@cxlxs*WIyob}LR*6`Ner>w!tO>C7$+<;;w8CX{ln9aF!ob{?1?Xdc84ZXh z&2$~+fW6jS%f%e1a;hd=-@nvt>ipe{UBb0ktr;+hK zYh=9R(u|DvX>=j&y^--YlEzKPe$o7{$9MyUX6?y4&V(Xk=d)1sWtx#XUYVugS7LpJ z54T;=19U@x>q`>~ZjFfLrqf3~d7BG6LbA4N5-YL3L%K|qwey0|uT1>tz3+NrXgT*D zX?9j(qnOU;P|XFJo0V7+^`xt2Vih(*)Pddj7%!WK-|u;7COUiTAFmL*zRdYg zaQClemk@tmegoCkl=(ThTQI8|F096P(AtR5DBRyoni5(4qCbK%$0Jnzab^~1oZTT% zX7G)OU+aZ)S{Ku}dodoNeU@b3@u%+nhtBND5#B$OXvENuEk}yaZ(da|0I79Pn*x6X zB75S!39F5bA^BU>=-M7V0i<~sb%{o-q4M#X-~YwPkIF9p7bBmx`^OJ4F@I(#M zclJ`_aG}U!jjSp7om7s>o)*KP$a*xS02J$I`OpB%Py32}*)(70IR}F68eH?A1U|1_ zGyVcL+$JzfxIcO~rDtEheCod1Ip(8b7Vx_} zbELW_#|(X$c^Y-(AN{t2Z2gWkAUkT=9R%$k{{rCj!>yUX)E~>@U6J7zryuAKS> zasH2s&cc;*JJQjW^eSZXZW%*!btAPe-qH~Y^Z3r7b0#AFBS<<)MZo{r*4K&AYyLK3 zB2;;{DR(D~+qv^hZ$jPH4}nWwQnooPSfl z%UZ8Z##{Ky1e=c_X+WMQh9p%3xc>auc!cpc8~OfpBj5ja**SEfT#hb0ibH(=k2?@H z|2C}*f&Ufv(S^TWT?Jl$nGDeV=7mlgT_M&FSHQu&{^l=b$oU4mdm%3oz7IIX0am|o z)+f|5(}xJ|A;_-z`1lphS5!!EMMEg@ijcETnGtfV2$8gd-B8UlWJyTI z8|p`Q*beuKW#6(ln1<=Hv*7(-$8LE8d*3`^MUSBV8In|quAeZn`41vCmPQ(HNZ{l( zbpEstV)h^PI|E}7AGBJtX+MoOqjyip9X`3Zhe}s$My3A>%$K6uT6R_gwGBCn66BDd zkf;iuyj|3DTCDoRJ;?M~Ky#1Tar}4p{{QhQft9=Qb~$;_VdLy{w{PrYM7e*!?h*s{ zquXv_@p0(NciAs3zX33R0`PL8&1{0Dw>HB4h21rGHKVRzp8aN@FwGOWv2B<{s*lu{THn7lBzSp`i|W-+Y$+n zkV7U`pUL}()xTuP>Yq4Aviep%Sbeu1PdZ@rhio&l`hmtj&3Qb;>f@XL&FUu%=>s3B zo%r1N=da~GG5C;-JlelMcw*s$?0XK7dm$MHDq`3zK2E4f_~;jlrhW^?!Z|&HS@5Q2 z6>1MYSbWX&JjnR|ziItUm5J8h^&hnUZ#^;iX<75u^?X>xK4K2Sdp;R%y!m=|k9*U} z!nXgQ^%?mjt-tID`cUP~(P=&=O1t4{B=W3oPX%o2q52T;`}K_&OdY@6I4o!oWJaE zBj;a@IDgS^h$rV0oIn17^B+@+E`@-QmHm_{{4`ObT3i~M=BzmVIvT+RC<9r?9<|(7ilMa*<}Ry zKSwHptH_nJM*cqtiPWT^=H5imy=*%Yi7*zPhYADQ4~17cdVQeWaWm$A()f8m5i+Ru zjhC{mFnVY_vMIeJ!7vu~8TvQx60(28gLVV80F@n@U&=QE{}kRuiCoMMrF18i>L_$K z-dZ1Uo#M@~`I5rRNnrQmV*Ju1@Ql(2{w?FaxJM0T$^M=N@u&O&aJ}JGKQEEOB?2Tw z2>b6w*ncO&{(nOx{}P1#mm!kB1$`ax3p7rc0mZry$=`uU{!SqIDSrUSPg#YEokGRh zpjhb=)Q00$P7HdbvF|Te`q$IOH-=BY*>{P}_C$P&eTT3YpUav7X1Wr@`CkPy^+GVy zmRUtn*r8&V2t~y$>XE&NqOJ({3T+2Zp(axHemb-G!spt6S*6fuE{v%!whiag`Np(r zPik{qDKin&|2<&%vT4A?QxMe8M^L}P6L9$!MC%(6t&evi4quAO1wuL5b|`0vhH@fA z^|Mg9ZiMS+qGExlm?M%yRKIpS6w@NAKNwN{YE&!~6?=~W{I@{s%d()D1VQ~21ofvP zsJ{e3{q+dy??A-@x}Y2-9m+5S?L%ozCP_HGg-+7yVw#MrNuixq9qesWE z_F&2D%QEHzgSizEsg}I{`M}wJz81Xx`3Q+WI))XBk9hrKF53YTU5toS3tk^P#c3purxzu1D;*H_pr6%F|}uP<)-69*R9 zfLLHlUfmcIwOHlJIczt;dXKfd_OY-{sjOu|pX#NLYpX)hAyz@VK{p$k`f#gp}(t_7tzS@G< zpVRg+um5?anb&{8KJqcIzpl}O*Y8?=8iL)0U|&mKKk$)f*lr76Utagl`o8DCd425H zcbDOF9B+vAN4)-%Hd)SP3ts<;%VrfihGA~W>#NsxK<&j*P`f3sFA4JB?`y&9OC|(= zhmK*GTk`tlb73`}{uo8Yg4Y+{0$%@n3tnG*%WjMDxCgIa_Q)G}{R-%3OI}}_c{t-J zBorZe{gPcjNDf0nk)FK1;cf-g{u0%0!RzZ=IWC7Rd3}AW-JC=J!Rw#R8v(rjIjG%| z*B7l0<278i;Po2@xH68zsO`z?tLB}D+W$edTk!g@Jl%uWSFiqWUSFE+<_F(tL__2) zc>P;*EqVPD2R`EU4MtwSoZ$8KZw}T%Iu~E)dP`nE|JFym{``ploKHn5_$aGVzpOQFV%a z(Kd)B7T)2HTjl^u@L^+F#1qn&cdSkt)`h&{_r{-tTc&-F(jCsKS}$xYdEDxpkB*UJ zV_46HQC$1}Oy(@{rBrJVW#6s&aWTGA#$TFkHMA_Cm|b)U^4drM@MoU85ta#m^Wg95 z-Y{z}L#E!Uv4$|ILa{Ul>#y8uN$n5C9gNt%iQ2#NU(|kznc8>ZQ7x$b9R#(X8nJP& zqnX+t<$$RDDpxbLAA$~n+HWPO{WTH)qV}oTJ2jjK*nqHL=R@LUM{Xr@9;E0_BG2MV1s-NX&;FFvnFEyJ}PP?_S64O?00F- zKET}Y&0b#3%X(Eq zDn}CyZSaZT16bx4fOY5xPXZX)$OMe!pMgzxVCGRFk!{VUVh_;<&9M^99$&{!f_jTl zy^+>mlVIFTAKvu|+dSe)l0A;CdifuizA}rw6zp}#-V)Pa;3H}S3RFti(-PQU{pFfs zJsXkm@u8_3k}8c`*@)U>hx!`8X1bihQ;VVN@+z_M>tUg=}7pkon4I#oY)tCP*D5GL~hAbu7aLPp;?-`J>L&)5( zI-g+HJ4EgS=2~K{A9DF~PXOs}BHmi6Kt#SV2{EmheG^)nis0KC>s@5D>U|=2GqM1p zB6`W-LVLNq5ddU_=DT^)cNKedZb(pk0}kfw(vJN`@c0{G*^9r0+>>dN-7n3%`V@AC zuNL{c33Z=mHtDbIqOLSIfm%v{lCq12au}MWAjjIx&E8vqqhJ(PN=}Qj+dT=akaa~? z2q4Hf0B~|kxg>2(8APiBfZ>ACS1ZVvOc|tE{t)Yn)5_SPz`F8Kie>)BpUBqz?ZXQM zN))-M~ZY{%CE zXF8k!Q6hbXqMAI7)gzb`oBGiHgTBu<)AtJ^d(ihWDieKggzxo)p~DD@lM4Rk@yRmki^!Q#XxOkQoEx{ellb#=u=@9C<$>`-OJ} z(pDxH8N7*>T=3;fQ(+hZ)W503M(_Pw%zmOb)E3E`D{9DS@Q2d|I%PsMMi@0{fOM^p6&?$SpSK$o^^b<*5g4_vj@0hjtPmPoPBqM)t2l z4TL@IF8e9i*C6|@jps@GhrYb+KHEJgh}cH_k4Sk6C{GX&AuP+FvJ4(41Z1;PRH1j;m$bl@65oN<&pW`yerlI@m8egt+E0lwJ<5d7MqaQjJ7 ze!%l#0PwfMH#WBkh4MOyZTJR6+rxk>=Ewsl^|8$aeC?o5adIi=4y%4&@T*B*&Yxg@ zI!yKF{{Xj_LujG~N=ZK`S1El}iADS%^Dw4ybbcDAA2Aoa^8n!eL2KO0HSH})2LRPt z0}*w|q+Ki6{)OoK{gd)OW9^+fRGqYg0w_GB3tqJpr3>b~V|}@5=_f-=$-&=pO^4Dc zqjboYxz~YXo$Ztc5ZxGJ0(JP+cu*l(ZYAouY&?uYBF;k2E9kmT#Kv+9|0KH((jlt> zS>!`G)6jXtR^@;|4HBph;}P~s3;tEm`#EXf?Z?{=*?NNsd)j+8iwPiQJ+j9yhY1M* zS^(!Wpb>)+Kr1SIK?;QW^Y%en+ZaMX$>pdvfWUPm&=3|$*eioLv0yJj_VK-6k@nI4 zydx_BP$ukY4jdMH7oeYDkGtOtlMn*5QDnyCWNG1Fq(GcMuYA>Z4;dk#goMePK%i&t zb)X!{gvn&?b-3L~AYHHl%9E+rDG@OC4L|^g;OOaPk$b(%0)Vy1y{?Ub!T%75Uo!Xl zVn`P(1Eli_3W7!Mb)`klWMl4iGt#-zpLf#$<;m3Rq6t7aci}KJh}`QXI^b6+@To@k zgx!$${9XBG?cWo2@1ZQHEgRQw>S9GTW^i+(pug zaXIgaxvjb(gLKGLlnpue9|{o8xQP*jbja9i$PmmW(M!k>ELvQ|M|scN$I;=u8NIy0 z2jo>@uGICSEr*$vP!~fA&ng9@DHvNtPWD_1XVJ;&py35Y8!(4w%e) z9vT4OH$jeH31W6tca$mO|3t zp1Iex%?6ZbnTLF^$h|I^4xegdF1E9$cBRi#hPr>@{;Ul@tc(C>5 zg7rg(b{!;!NM|zzs8;F#6d;#tWDm>YXr)fs;0yc^H~)7j0$9VShY(i}?aK&nAlgUU z%vlTV>qhM}Y#vB9FC&1bJ_<}Slr!DOW^+_II0&pq0&BuM2?6C6PBjSd5fIvKCnN>d z2k^F+!KWSs)J&VlVKEQGofO$yBwnW#aq7W764_hkUB3{(+jra|@49k1M}=F({^y$!M+&TTjpKA4LPV!!Htx#S7vf> z)g}<=nRs2fi}M5obV$H5@p?x9+{mE(ZDNFo=E9Av3q~p8@RKIe1BrP`X_Z`s5fXw% zNYLjW(Db=*AA1F(lt_DybDjpkjqFFbnW8ARLGE>9-t+ZD-gDhjWeni75eTR4STKoz ze5(UnjNH8OwxFD}lr|SZ_3(&RJ0wn;RA)SnaWT3Wi;x@({PN0rKtnO&Lr^Hu&+B8}` zc((v~_beP`Wj78j7y*oZ#}O`XzP-qjX>Q98Cn&}ugn5kJ3&LK6rz>ZIKpGP0-bV_+ z2*~}e-ZJNU3a#>JSpsz&av;R+Z$iM3PMZVvYmohWdMxR{4$ji3@1epN;(kdn(f;ct;a!Q+m&~i&so`G_#6Z0)IkT1Lg!xsr~ z`zD$NDD0_0{FCsL2ifn5jVA431Q^yXu}HbD3*?W2Zvujmy`Zn&WDmc_*|UQpK|pc` z1hDB-NdZBtgsa>P!<9&xuKbCk^5Mfxb~*||tliXPGjc7-q^*V^7*G)6zL-vok?@$N(!41kiG#IAO1vgAD_G_+5 zialiJ^z3T*Zymt$>^Dmag1$b1#DD&0*EG z%1Q&^$&a$OyT#FQxL|s+3GeYcV8(1lGiK(&4<8kz(*`%go9Qe9Uo@&=)E2`s%p; z7ZS?v2oeI6FD3dSTj>W`79cx0zNxG4{TY+)5fk>_l~eH1`|Qi(iN`_LJ^mx`Y8Ucq zxg%6#hUaI{f}|%AJa1mg(A_X_?k+2mAwoZNcFyu`9xBa7C4BFF_gK!Fs$T6AhpA4M zxZOQMUDxW=!HK#H3m-y&=yBRjWyYm?_&h@WQak(f3iximlfy}Ithbt@aOktZoxRWH z`YcF60kZ<`AGCF)C;2|+4Cg7%T@xuO&Qw=3ULPC*SuXgq{z0kDVPpPb*B};rZ2!wZ zVC=(|ZJ3Q1`$o!jtBjd_YV=kpd3l@fDH#sg%X!ueW6JYACu6;}29)yrTSVIay4PF0 zXJ^lp=ZxpuPkl&vu0>e<7H^3RVegy0olb8Ru83FG&EW*(LB{j(^>eI8=b18|pG1<6 zE4{_{zX1^Y3Cei>IH}|cE)Cm*GCntWKeg}ExMH^I>hhQJ)<3=EXMlJQJ<~-)e-U@kA-lm5OZ8=<@Ee?{gbqeUylOXP0;GnOgg5!15E{V5ig= zAoS8PVsCmyt$ht*^{Z_@@476)ttM1&T-5cM^|AvWQl5*4GrIsRu0biGYi-VwVDVBH zwlpuT4B&ZlE@|Bb%x?9s2(X8(37-+vvnb{H5l>=Kw!)<$c*Q@rI7L5 zV1nLpUJX_YDbKNU(9WE*tiLQ$o~u?g;IKse58%>a=Q>HMUwu(8SqFOrz~h?|Zg(sf zwcUmN5#$8QyG|@I?_9xG=pm(!0{xfCEP48D7yfB8PmjxuImHJtFFpMTs6Wip5tQ|4 zrho_Q>PMcQRSI#{gL()S*g5b$CA~>%c}Z za|3Gab)?fHm=_^skq+sX=Xd%3a(KQL`qYt;D7<4>ceX!Hbk?zzvRnv*HW!7#60Yy) z0oQl@H(Xz{vH{t58SNXQ>&P(v>cV!0t%(-GBk?FE9p)lE5QWhncfTHe7<>OK43j)m zb6YLq=uKZIj2zH_+Sp=jWAGL-r#d!<`SaiVB|_loiSJ6fTzF3oC}1fcrDdnrvqt(w zl>#@vQ|qU$GbTZA0D68b)cB`X=T`~%eK!OoZ=BQ(2l8Hz$orobxGS#0UJ;}>UpV8n z>stWwQ9|`m7o4e(GF=6yFW$slbLym30tS2?SjL_o7O1~52r{Chbn2e@1+IaF--`yQ zlfdudB-A&_^8?M_3#?2YpiL4+NzLUNzFwvNQjeQ*t%u)YrNEX__Z*)qvcUI!w(^Nx zE^IdQ2Q>N*qj4N|yJRBqZH=^1+{w`fMU{e_DSx2n0M$UdwPo}rD6os%OX`RTG>2J= zBz{5?wAN#!gsxF9!_{}a+&j}lR!#&-F?`H2+E(#ckkOAtG7Ul54*9tG9$LLhl&cr!>b>;$h(J`Yipj;q##0~_ zevg$+dLRfrj{3(5UWY?ISeQcd<`FL;fj}kc>J&O2{&W zk?A0zB_yy^W;HzzHY9&TLHU(+fRHfYcg3Hfhst=L2vP0agu0}Kmb-ghoQ z^ZqBl(Zqcp{u2XA$G7l7kqC;K=IXHl+=Y}cAt3ngC?Zc-Wz)n?#GKcwm?4fZSs-02 z%=zD2CJ}R9+p5Fb zbpaQz^_5NTrmxHuJbc+z@J&;2dI5h}NP!$S8z`@y!pS{9fAB%~pbYe`=JCXBH^iV$!b$2Sm_}eJt z|B-bjU`<@z|IY$RfFwo=5-~uiyQ0PoP+StEifa*3aV59_qGD7;1Y{Bd!rRv+wp4Ks zuC#8bsHmt3prTTZN)-h(xKc%oJ1Xk`oJnTV-}`@_@}$mWzV~y_J@?Mcy(i~BqifK( z9ByL<@OI{BsvkBkH?LyeAP-?%ed0Z%JWuzOIS=hHk96K4&uJBPvP`N%=e=(0hs+#q zh$}kpXLR1ryyuhzVScKJ2MuF9Y0DumX^q@uS-(W|UP?ppA8zIZ^xiM%yykuW!N*%AYg z$cSJHCC*#4SoB{o3J=rv@|=F8w)a)OP^6(v(vUE#LGW?$TfQR!jdv^51gYN)4(>Qu;G4!_Uch58jHJ^=+-UE!d2 z(QH}aY#617*$)-ZmcvmvgbyRbO8FFKW2zEm)TB(6PC%<1_2%;ifgu7YLr7mR*O!BY z3MIx5%Mef!>a;?TAcmPp4Re@~);Q_S7YqiCfs}^o1+%^iH1w#U85S(0G;-4zdjUhb z7i-ffYDG(wc=KIzKw>Z@VR*spofQYJj}k|Qv0B+A2ysDL7;30Ujb@Wwqu%@7Y}IQh zmNA?%NWFLSZ1sEc5XjUIE2U(_C24w8F_0>m>%}z5Q*XW`6i8-MB+4e{jjTD~`olq@ zH8DIxOx4DeW)CUQOIumCTP?{HweV}NI<;}*A!U9_*|trYzU^^yIF z^(k844(fv!O1Y=K_{SU;k-kjuI7{?3aPzf*Ik-25XSADPh^9L20-vq%&}nza<3Ofz z8$k^p#tJSwk8{IMBcbfEI!&HAPx_Z@#SQ3fhV`Nml*D46LX;>bC6KVGjN0fXKaPN^T;=R!D@ z99Hz1c^HNu&0!pZOe4}^2qLe%^HI~b(zV6bgo!JCwt}n=j-)4|$hscf_J+Ue$t z;F-gJ=7Fif5(k>YG5A z`Kxr)F^9s$#i~Mg%RGHY%RDuH!k=iOWuD4CGrq9@yLsxr_bFh)X-~hf#d#;?qc(LF zL`WKX=)z-1LG5;M2*^IG9JSAFD`uZfRxl&9FuB!xmWl^S0zQcTROrOJtm$Tt33hq@ z|B-AdeZu~hiv6XmS&Yg7bqv_sZBODl+)U69Ht56{HyuZRXriE~V4}dxf{8fx6wVP( z2RF&;(wPBj7szu#2;$mS#f!+8HrI^;oPz#6ylcS}L2kk9Q$ITR`KtSD4eA{Rdl{C` zHSbrzOke@S*o~FfF?5gSmoi01Ss;9B4 zW~^%0BwqkYNalv^ndlt9fDJKPlji9kiXtwD?x`nM?Ih1B_e^qjR@_m$jJ%`X-|9_& zGfZ5276dv>dYC;CK1GvC@fvxg)3iN5xb0*u7trx=8OegjthMbD!k*%0l$^;ad-PBH zi)L*w{|+;u#@|;@f`w~jtjikzBWxX9U{0Qun=^TH#~eJUCOW>U-UUW;Uncez^n zn_L3pZL{hOe%4t5Oue2%t0*?abfs1XioYwX??6hYDXgiUwn^pU?`rcM#&Gfw%*{`6 zjrl{z=vHx;UTV1(CaQx-9%&s+AGeLayumg{uOqZGTEXn}G*8=Xd-Z#DUdLiYE9hw4 z(9>K~@2(CelSS-uO{b2rF0QtK38ds;3e8ZuF*B-LSvDI2-V({uRjv^Bf!U;p zi%^1gx$o8vi)|*5F@B0(u7y>p<>)igeM_9FiY0`}IK={(Q=UhK&~UP}IW@46D3+12 zHFHU~ORKSrad<{*>dP6$+wwrWQS_lPmf%a)EG~6<(EiDQ#D0cyQ z2--5y^(G|xg6M^6ApN0gWL4I(Zdcb}F;!5^KdX)i%Kae{z`BN_93S}rnmG>`29K^W}m zZcowL&q-@1{{nTguB?~^ju>S3m^OwW>rs3BH?){}WDb;M4e6TcX2I*KX4t_w z9XjUYnbn;Y5JvGC4ei^;wfT=Z9d&&$@MqG(Vnt$7|1Pk`_n!k9p4(;!c4o{FQ2Bo4 z-dSyg&`~{vIDG96?nZUg(4X=rYc-RhCD&7R4E@PCnZ46`2pU&MxCT@G-R$ky7GzkS zC=Dnn7zXEm6de=}&LhX~)4oM^^Vcb2AmdN`l zI<5jF%I*!tic5w7Y3?J0+-4j7;7=DV@nLs0w(g4@U={f^2 zen*U4*9c0&&>>5P7`;(Kvu7$T(eP7raWzP&C7@~tE7YDhPthI0Q2S|cW0?70~))ml@9>T%uphGdG zAsg$v6*QDv5kt5C6s?gGF7CM733y@HwMDU@^QwxL8p_cNpf znNzkrTpU;k8cmdj+|O~R3*6YusIl{q!x>6LJvMq9Xy{1|!~PN)<65}5RE_tnv(!iz zI9eluhye)ljfXpeWmLs}>W*;F%{&0^2#*KAa{#gG?8E)kEk)fU%g8r@!!^Knt$`dT z+;aMc0*U@Gkjy&laE>A|jm_T#@K6O7^#+akNVF=xBL^e;(0MevrB+pLfgI z4H~Q}=qbEguHz^T{b0vjB>5dRe2+)a8q%@RaTYLFBqhPV>Bj542+01%&Q|3bK}pDm zWNkoo?OnH= zL!iOBhZuKVV<-)2h~sv|*nt}PCl=5chOscJS3xr;%(o4ti~tza!$Gz9APisyJ&Y%) zp>#|H?*$BC;ui#tV5;sXsS!+jA4YX0JTAyjQsYa?1nD*XG9al#l3Ld#`!o8%2o^!@ z0-+pVe*ipDdJsL)eDFSaA{nYNY`iZ#V5Zr)2fDeJLqO(9aN~@VnfC|g3CWNPBI6&< zXW{*UJlKEY<;l#}+F%BxP@FR`@Ly?03&;B&f&!F>umHwk3+?LnCREocO$b{;!QPu- zu2aKNI0PlJN-`}}hp8TKL>0ziR1x=jV^x9pAHm}ZV$lPz+ZD=g@*$2tegX|WYRoEe zctdG0CPW_w4LzwLJN%X=X__G3yiFM6WTGU%SxW2wfid|kNR*ajzNI9(Okfs)guDnb zLVFSKX^rLyfe++;9>5rUma0wj1o;EyK=KgC{CL*kJtY%1As=PTq>S?L2U;aKLfrE` zRHl|90e@jl0|^6?WSq_XKuIJ|P#o71aCqeSqz%uiMc$c9=m zL308$45UUuuP>ls^`8kw5NM+Or=R;w2#3Y1cb;?j@~8X|8{?;*4%AV@YM6U+5Dcqn zLHLH&a^Rs5J1csF{Jd3FE_^;7-mBit*FTh6MpqmrT<9Gb%n^>Sv17f#jum~r zneJGY@zK>Ur10~L!_=tk)F#z)lfEf4Xd#$EY@{I)MaLriI~`?*VQ zf<{JbkjN4J5JF{EHB8ECMvPA=p(!5)68Pev9XH7lf8_($lZi^_Oyt4)2kQp}aQ$Hv z1YCx_l;_P2?CtGBjT6Zei(kPwQSl0gi7>wlFien_swQGMmAiEa&|5g26T zy?rkTOW13@5=0rw)0!&ncOj%Fbx-Y0r6*NS&-#vRqE^UuftU(#)*pjeMjHijK`;^@ zp^o!VIMuHNv9I8et)R!r#tcYJ`Va_k|}owZcMG5D#8@ zFG8MDZ||9Y|2+b;B-z;>FJ?jQOfMM_Sv70SMQdbLeD}sA5dkwV(A$^}MuT^95YKhC zH8ZL5gz+Ss2?^(7i7MLU3R{5H-20j~ zbMG^!WA4%BN9waUp)@}4G?xsqiq2&>3#~aze|?J6l0>C=jpLL1n*T4%YLf6x5YL{s zw4Z<2;=sNfP9OLb_1$|p(&>|zQqbZxUGT+A>BNk9{lNIU=pl1MYlEuH>)i^RN$Vdo zTk&aywA@d#2Ijni*R<{DUmK=Q!BYXNW-EN#d#ijq5ZZGbHoJ3r3T zKyJ|>V9a@Mo;61gT9TiO#rbe>5P}B{_wM8m^R+=So}DI@uI8pN2|9;q<3Z&Mn5|X6 zK(oBdTSw16_vy~}_pVBojke6D=E3`P7X^4%lZT*$8?!UkxkKPpqBpgIt7&jP>kTu= z=e=PDnVpw4#WsDpM7g6>Hd-1(O$2}FO2FLUEJRaw%Dn4H#~Y|mmlktjrR5Y>QWi6( z4YlUeazJKT0r3X8cd4MF>&_uNxL5rYn{crv&uI=7vS2#RIt>z<(3pZ8SOu!&G zXS!FX;?!^|I>sVSEH%lksdc_s_#9TDwoYSk^X( zCAsTo+yC2ogPnFal{9rx=e2rVgfcaOp!f@Z5geB2(t4TSJB#)Sf|3+ zG`iIa4k?q%{1iJVV-y@xrIo|KvDLRbrBSV}K3`gij`J5@Z;mDC>q`#F7C^`pBcAf% z!`(&G#g&As`W{%bd6OmfIOm)I)iS64VOy?XaK3kMNP`x#q+tu~^pB@o=%w>d z2e_}GMRfOU-l%!$yy^hgLLLIyoXxU$n39HSJMHFZrfwQ8J?5~_f3zwdES=J7Z>QXQ zgdg)ys6z9uX{CSs<+>YKI*-d;NhhHlY;+*=r}l>9s9LI!95lG9`}KGycTc&50dQ2zmZ!SDfVRpNQTRx8uNLh}c3 zF5-Y~>EUE)X$=>`Jg|ulcQV|AfV8KI-dCmb&Y=AWh|Y7fi$C4fXDC1L#PocR_cj3^Ou$mujC z5q6Q)K!Ap(0W}Qs&(a!}!?MsNpwUceFuyyO_j?8_aEC#|;jq(LN<)7!_#ZULKPX|D zZ>A+O3uV)jKq9lo<{Ao~i$fAfD2mYv+wT3&!8l1;?kGuh$flMI8;*p1f!0-h!PeDHJ4N^O-hTX! zK3$GEJf*szsEif;#@J`HG68%~kq~_9Z_P)=YB6CQG1Ny3QO88;Y;AX%)(P+{NR#bO zgLT6Ms^bLs9Y~YIQ8)xrixXIO40*N=2gz_T6>U|e9q>DgR;9AtSz2ShAAj;S(0EU2 z6kcR${sD>KYp|bI zsRwmaRv(oWGTVW#*V=rY*~KNKT@mugOsXw~flC_+sO?Q_+!rT zcL=C8IKS|L7>aHuml)qe346Za0%rFB=$hTKcTc}Ti0H?hP?y17z_LS0%U(S`-F6q- zHq_-cm~e^pF~{`jm=spodxAh~zs^Oq@QkGm?tRs=WVuJKhpSCGLVoG^L@x z&bqM;G(<_D@p>t%n$pl8m;C}7VWdV*on7NQ^h3D8`Z4cP{jlYjJjZwsROeuW&+5Fo zjtb3~=2C#(@+W%Bu)CXPU_T^aojXo`vsB4RfPT2Oj_N=op&yn2i3&-^CuM-V)k7+d zn~=mplIUmLqifT(2YqjEq9Ed2N)Y87X_VT_G>Q6Fsh*n-B$X5ibDT@LcMFinmIF!a zkj5?ZRa1TBq{R}p#qe$Kr$zQ~gf4i#tK@+-;~S9SlRGVh9e6 zhUF{i(Y5lp?2A$Q1#0ntszv2-`4?ty@(@0cJaAe`4X~p7kc|Y!=%M5oy*RXpMKrW&KWrhK|xuM7d-h z0}SRrAd%VNw2P7`yq_@=B$%U6A}nPuEs=9V7W%Xcc)Cr3&}-sc$`jX?`WaCpk0WcrayTI4f5RS4@xEM0c#8zWDKel zrW~ME3Qx#XPkjh*b*eVZ`7R;LK|}EZ4f5RS0HvXS5IhzQG8Q$=DTir|@)NR5&1dkY zLTUsN)v$ayFTh5I`*71L-fl^;dIr2j^k~?PZxOrkE#gS$RO?#=#2;%Gx|FqTEb}uq zQnjf*A=fj9g6pG3`F3e4rO}~h3uP7Xg0xNnuMfZI5{Vl}}` zMEOgn;^j>Gv9qGsv;ha94cOrt?l;jL?w=m~PF+KeIh0WL(A{S({xzfDXhSOW+5FVf zCaTZY7E3pR1R>vu8t=BhqBWX&NV|1~2Z8gHhWUQ*(O+2*B8D0_QZrsr8kS-eYUoG_ zbyPDg@wP|5o+$BfY%Pr4ZoIzbo$;|LIAB8X%0DTy}Pa0b+Mo7~RW0tJtTHVRP5SJmU^xwTl$~Y#_h-?QWxCOXa!R(o(tXA`Kn6iCi9YvK; z&S3b_Bj*RVr{O%?=HL&*O~lTJ;bb6|pj(&PJs5}Oynh0-Ck9*<>kBsq<4-RWLK`OF zobsnF=k;&G!*;HH@{6c2lW|4WRE`^qtF)|n&amUrE|$Z79?afeF0q|bQtpAYTdEZU&_j^y7fcD3MC{r54uT1HvMD(soe*uQy{M!-z_#R#;@w-@KB z^-KMoEZ;#{DQ@t)@kO>JDE%GB;6aY5H1DOT?!p?t`bn#C@;~_E*pO{)1drtfq4}#mrPqh%WY3Pr!PH z$izk|HD?~SJUVc^VpB({kz$nBIkJArn!7>mpoQsb5n+^z;hY|K#4<3RWesqLx#@vu z_&Z-N<~(g?1u|8W zOkuK6WU|h48y0EcvIrUZZ^D7y$FL~B3wZ!$x!w4gZu~3KDGCTi3PX%&M|K;22IS>{ zpFK~-qMGv}jq+JEv!O9744KE1&f=)`ursH7tTqnMn!*y(%%5ij=CpH%lpH9;^WbNC z5d16AWML6_y)Q@WN!g#FJ>d~H5t=yymIWuUB^dnP;<1@n#ivn z<04dJB47#AsDzfDBNN5IVnd-3R-QOu>T!CbA?rV${3|=j;v%8A)4o4Bg8$pvo54Qg zF$>F!!SYtMIu;slze35mB~eA5bA*c7$+`cd^YOG zw1>6XzThmC*v2iJx|GSK@D(%pmCVL*vNAmt)%CouIDrstGr2*soqqi3y?OD8!4q zGa`K>kplS-?MQECiaq~^f#xyY5*4W$)g<82kvwLZ=poG$NGRq2kD8c+KzU_qp$&*{ zGQ7%CECC+9kg6H`yhqMI{efp7^2`#lHF(O(^TA`>B;X03gqc`KTS@jgp)40x2SXr3yMoF;$Z8H_wX zwqh&rl$Ga;$1LQDA$f-IKihexMu|)10Wcm6rQK<26d^5FT91KJMk{tct)%_xu>h4+ zsI+P*U*4PU{&V1m@dmgmBaBv>6D7T7wjKhZhppI5HyTMKNkX9%5@|etxLqhAiV)wH z4+5reS}7q)TCKDm0;OO#b~LS|b(I)UshmU#=Z~=~B}a+P2DmC?0xgsrC4FeN9s;4A zZfqkh1P>`!Q0Uqu0-G&@A8uDFjS@eX4+f-(v{Gr5v{7k21WGJ-wy!%?Ijx7J3YDr! zq>21UyHI76_?-c+%9un8RYplan5~CEXqG!$L%3_5Kg(gA9QvXtzZtvYWd3dFikT0g zE5aYut`f;pEay3C#7O=udpY-`@_vri{S0f|Z72uPe)MRVAbpCie*wFqvbShfZ_3je zjLb#Y6&InW5qTHXZD?0hf6HPX0VO@H#QqFjaR^jmD(U)d*cv=#b+z7-&xrJmM4HA= zup_;R5_^6H*H=NI)ytbSdW#aQJaGS$1(qoiu`ec)u75%9LGu{CXDQwQ zkJ5vx8T&hQ#i776gLHilwgOLCdHf~gCIe6SWNg!!{1iLSaFw`cYq;;F(L7?K#xNXi zR)mEFO71O6p$(K%rdftvaT)S7UtmaR9)@GKrZ3#~q!OCPO_I|T0z4|@dFa7r;wdXn zkYpC}#E?9T`4{axQ&rFve*zy{(u($tsVeA-KW#J&V6`CzOBUdjD!g*l5`Lxq$~o|} zMgv@#v6Q}WjtaV-^$_~Ggl)7Vgh`T6D1}5wcjhGZG1$IGyK`As)e;;ET@tLPU6?v%Q2{m$43`} z&CFi3&14c)gfs!HpL_|u^ImUJEp2?&1>cnfSYNpkt*^c$^`@<_=Vvo-fRfysQquO3 z^-Vj!%QKbV<0m<`}=q+lX zfeaU-(}72yjy#2z%yu5LAY0)I_hN<*%@ZKWnKuS_(vYXrn_X|`nJKX#&pVPQjsMQh z(*WMt7kEBUJVZZznZ|GpfT~HLH@!vgXdcm}e6)|5KpyiYy$@|4X{&6_1mH2)c?L;x z62@YSB2R`7J0DM3?Xy%85(zw^k?3OtU*3lv-dhN8PXk<*p`(4H1-#R2Jp@V*eb~$n zloC5hG7go(QHisjKisY~e6hGvJ`T$Nl@=PlSb9xqJp@9*9oW$wv}DFD*aF@5te_(< z_d%Uc^4EBjz2~~ zWj0`W!5!Jr6bD1AlO&@F(nw=&;7_-kK($yb3J0N?w2^#YmoZex7OkwRno{Y}yd^}|} z(GJNwS*fi=?X`~C8zFDmRcx*FJ|aV(0!FqY#-1&Sq+`#H zNkXQ8Sm+e&0!R24DKWV)DM}0p_9*%R)hji59i^c4P+Hzcbg|FB-eT`9krQ?lBwX3p*=~xVK!Kn z;7-}aP$Fsgva3*#lev$W0F;yb%#(b0;|gp8`MrwyWBSssOd(4f5UZKQdfX=uvB>bY zYrVLA;q{X$c;I;nY?-a@i9oum^1Q3&F;mR8U9+pEc8T?cDDbP&UTi8dP2N9y334uN zLm+5R?HK@N1B$@sk zX>$D>X~0kT%jxzxve-FfO0{Q8q~XnMU&!Bf7Ti(z&hJg1iVtvkVAZjTtNI$uG|iaH zgb+32Lm%^1X`n*kU&`1utc%OPw$oPRX8kJxSm+dy`tB+@&z2EvEykiXA1J&MjIn6t zj6Cf<7UTeX*Qa;{%u|U@WOR2i!k*fp>@bh+E>qpQyI9kHEpVLD1}4{rM;G=mkcq*F zBKHMTF!-!YaCtOLJYF?z)I@O5HbdG7#f@WF+&s8=t8BBz zcQeep<3F;`v`Hq~>c|t4pUrdSI+#9HqO>A6M((Y`oq9s%arQDdDx5y~H8@K8+m@C^ zd)ZMgFwq}{MW{A2<2h8E4l`ZI9_sSNuhMmQ47K*H-od_coi)_un8P3B-aqp=tawhA zLzrfqOCLHZ%HFa#P*luAMg3+-QDK|xtzh;N!q0x^bF7oPQf^5gtPmdnAxUODr!=1P zuRXc92><8-)580E;G*Ar&Y`w+-ky#F`LlXJh{oZz^cr}BeD(3dN>`!;jB9Ltur2Qp zoU^WRRsLhX2Xn!U8f*|ljA#plOj*}X^*h6U%)Eh+xK_SO_(v4!B+LgRB@h@x4VO-=b(cv?l~f z6_Oa$F)F|qt=w5>5fk23WHQ+58`1*iYEKGJE>uiZ(Y^TG-)PI&GW7Dj_rZH{NETS9RxK0?|qo1+| z{1}>UY|Acfq|3NSXzcT(`z+#QEP`41dice3W-M6R>3myVJafBZ$|ACE_laQ0Hl{l+ zyB$o0rHS^lQ_;Cnu{`N^V=`SHbF&Z=j_G2l=1}h~!&tY+SW;#(r!<-Kr0x{v97}_1 z>-4vSr@ByU28u(%ci0dgNat9=d1`YG0(J;j4a7aYRokgHWo{K-8UgEO$lLl!Ti#RJ z%FWMp55UTW570{bSRGo)>KS4C%E*nqQ=mDmNgkFB#Gp6V0#c=+E$^SURi3&NK*!9% zniXz0rb5jKa#cyKI3<+!1iT-6f}gNcIWO(Ln>NsGd5@|Su-a}1+#%%5bU6WrFDBI+ z+wxw5flEjOH?@^NZ%Za36YYSGo`(KZ*T&M8_X@sJfnTX_HIfsY2Cee8Eha@dx_hlg z3SVWeM8BGGe3rB_78YQ37%>tqR?K?`*?o$OEj6s(SCu=AD>+8Z2@Hn&h*r;7$r-*9 zl$Lw(11gHcUa-I}k7LcZXzvL-%wCPHxf<)N-vLQIY%P-bdZ4Ko_Rlfwupg*`1MqKH zOlydhHewPHQ!xZ>y^A&PP=xjC$?G?|@Rm_kCVzx69Xm8HIf~YqRHtozp?ic)^9Y+p z9VgmpZ<_sOYh1Kzz$Jh5q`H6AbM+(E;zy-^geC_K#N4S7?WBCgSRovOjiJQGD9hn= z@T3Ql(*yZAYeA_at<>;b-XW+bM9IdXx3u@@PDl zLuJ)FVNuJd!N6GKVPWQ50Al`vJkdKfkeq~yX1FShL!Nl#8TJdOo1N$JK>n|Jz|)=P z(Y=t5nGHM#NS>~qylym4MWYFMEF@21oC$esK_R+BWvWf5z{j9aE05x|&>`t*sh{S* z$Yb28fv-Dmr>bUdls&_$J;SP%#4&bJ4es@q5LWLX@riA@v|msTIN@s4c)e4@z)U9Q zr_g-TUB+!3g;N62GsH2WXSuDW%xYmO)^sIl`PH0$_L_biXgDP+0mLxSwwdg#aB)wO z3MyMgR<^GvuOD66+?R~!Kq!5Vgw=5>B(!!_!{IV@B?zfe$eI+Ad0rS2{0xl0mNfn@ zjf$#(T60gh2a)!X#$U@BXh-r5;{UY`j2}cBKeJIDm<&9PB+md(-awkCw z4pOm@Jdl(lPIjKt=FGDz4GiMP6vAbi&9u_MAW^KrdI*#>do?;*i1}Rj2!)=ICfdwd zU>6!1#Gh}0s}x&oLWG(#G)T07@q#=CN=No;478Hsg|GpYo}-d>3ulpCX-*KV+Q3!J ztu)e{AkktlDjou%`n?(pEu?7_zC@u{q?NXEmQX@a{=6XmG8J5Av@6XE5+$jvhd`ZN=|c3v#)kcd?A6#A zRB%SN`wa94sI;}GYZ`sx?6M;r2;z?|fu6OS?pcLR z^1wI1ql?F$wbPTgi{@!~74{Z*RBw?dH(qJy5uGWM{tY}Tnnyu23d`rbg2w*?fyV!- zQPMod@4|t|Gl(?NpPWDJJST!4pHbC=lAc!5ycB-!sr&>;1td~|C+`m$NoTPjlIQ~> z9f&vBkvs~|lo?)wl8II_HVH%SY#j14OwKBtTg^d)7}6yBIfv~CX6W3EH{k3CXlFA+ z=N4HHL0#(q)L7`cXk1#&L!tR7H0%K9s9oqu5Fa=4b2vuZHusfWuNVw5IRsggBc8mY zbWJKP<_}O4#z(A)DL!mJG`2N5jMwTDyuIwVwPxavgBr^GGzZbDiv1e+o}*tcOqz7_ zO$K!nYqTgof|i=ps*P9ET1zZ{mO&uNek$Um!nj{p?x#2eV%)|1HR}CF`_E@Y7ugT+ z0q21WKH%&(9^z2G#{4E6_Ls{IKgc3PJrgzuiTrzoLd=dQKCvF+lpcap*tr;-m_eE6 z;DKlLIgB?t>)9L+KPw=t@w)F`sUMd>uuw}pbqh0{AGY%?hUx6bhoyJ%(q=eCnn783 zQTq?@FrOn2}Mh4DIvkYx3IDVg6$cUB5dJo5_=Q>~*^UK-X2SjI-g zQXOKR1}w7MB`z712Tus6Z3UYyzb2!05U89u#bKT1?EA>F?FDGo4r_9OzImF{!G z^f66hm73AMu_D3l68jTQ35HV zSamLtTZwU4QNl3_oR7BV{ObLDCQd4}=VU|pp|Vv{FNcx6H;Wj%AgH58{V{`4>F*FM zvmQ{b!O4Y-S?4`$8wGj~k;oy`{*X~YVCr&DkB}&g;*ol)Ps^iBJLOTp$Cqc!sNRa&Cp-X@8#cd&iE?BM?e+|g&;^(Z79YCI0*-1EQ)po*3CqpDXjukw_|^}+;F zKSl5y^toKfvhoU^+4R=ao}V7(|u_|>u4b*oBNAC_s2a8 zr>m&>QTBH4CRBWbimM2LoXXfE7b=&-{AjShZ3cXRx(?bFzY&68=3S=4 zeD0+{etDR*y?{(%{-MZZ4f6@CC&PT8X4JpQC?C|}4jMZWLU(OBOZ_3-gW9Znoz)I_I6R7@B6uV(cB81Tok&)(EGVlmcUZz7b+PVyL#( zZt^A;s8sPPm}iAXf-oO{Q4C~HD8#%UF<|O(`nzhun$3bMo>#GQQBdA#CO5mC1Z-@b zcfaXv!1Btmyj5ESDtmd!DP)vSEL9kI4H@M_7L?#GQu%jT)PqYvAk8(n_#cjsBt=Ef z3QtXyU&4mCgbk7CUu|~|=?kL;=DfGl=BY>_qkLk~Gfa2ou+a!LbABQ_39Os}t4!b- zvFshRk+ntM)+ir;I%7lV3ym4RW+r7C?#eB9^|u+VxIouHnzB$(L3ugs`k1Omx1z7i9fQ-Bvxd>8#Kzb33@*!#%pYK}3e2#&) zRAL(^-|SK&mtw*a@;jN0{U9t7;ubw3B{{G&qu{1L?@B8il5XO+9f}02kHL@>o+aeY z+MrmdJub+YzfpTo-G)0DHbyKiQ_SamV6ENOF|r1l_=hTgHC&dU-}oDfogf7F{CR)Q z{GifJ$v6RnNnmQfYJbz{*$WX>Sz|bR+rQYX?A}o3+pVEYu@0ugGmF);*U_2O*e%{M zOTYo*m!QL)6x_Evz|2(f@Wqfh%?zE!8a9ypQX{>f+0;>uB<*YcMfd%Qf->)HYQfC% z%C~cN{VHYWY*-A+sgWnpi$VGm+z8343D66k(rN`Secib%wDlkMlfRDk10=m4A{Ey# z7STxNliB$zpjD-dC=b^@@aA#b08a_>JWOCS@szcnwDB<^PZh~?R`7x1AsB*+RPpX9 z0Mty^O!`Wb7Y96MlINYj=mX88d!63{c+5SJr=iAR=aHVxu0IVtW;@S2Zyw(Rc+Mlw z&P8maou{48W8`TdNFb9SIDj4~YE#9Tyz?PGpj z-xGMGy^u#-r%t5pqdb@G%TI@^y3|p_xbhq~m~XHi0-@mnqELcX;=R@M0wn{f1V0V7 zD>2Sz8~GREGC8fpIM02^udp5hr7;1baRCKV0lkk=CHDpnus6v8avA$?y6dyQdvpAi zm{Z2uKMBYf(vm8@r-#lrDFC`tPL;p`&si7Es?!4hf}S4w7mh=b0daM7PZz&d2=6V1 zyWMo!Oybwk!rH}fdx1l6TevOAye;TS?(dj$H5#s;8IV^e4Ll_Y{P1*%x!UR(1VcN} zN4XT5V$M?RLDhm;_LhiD3v`?XQ9pFG)fc!CU0}uOE;cADiS3&NH6j0@NMc`E26;Co zhF*o0FnygqNo)Q9rIcQV6{?JT_2kW24VSpTN>;50cS)q}V0oS2 zft%3*Zen#L=W@31a;hV+C;42%+TO(4uBsCd$@c1AOcU#V1EmyNN&Hq58ny&7D3Okr z6d+2b-6iL}W-z+TU_`Rine9l7zq0eET!w}0G^%5=`}y#)I|7ddc}kbF>uDZ>J`ZU@^ z@@v`kn}Nq*=NaI`TipqG-U=XMJ1`@SosZ|pk%F!A`G7p1NuIX?lbxq2O z#cwqYD&Pqvd9DSBOf*lIkIWF@k%u5p*aPKCnn!&dI%X1FrF=l|n5e!EJu}&Q3~E*t zAi7N}g?-FNB{QiM_rPFRl2$>-OoywILCKm&QCbBZ(_lRWLiYkhwX{(3N5xMdr2Yw! z)DNWTfMnaT(NL8gEw~7m>1ZWG6*ogrVLb#&<^a*d0CG=BDR?XATkosjt@;OyRa*V$ zOMck}20z+c4IiV2Lpl24SWdwM1D0c@GylyklB_xgHf*xlkYM*qe(&3Z%jyB+T8+&WStrY8ygVH;j61$)Ny(Y98fK6m;SOP?E=&Fl8 z<&QucjzFa92Wo;sB23ada38q}5gY{NuG;B7JZ?|m`G!2R2#71Bd4BULN1h6j=eyvW zohNOjc=svbkq1)4w&lGhuK{=%_2{iF0itg-kMdLf7~qkPB@l>LFVCcaOv+lgj{uJ{ zljd3M!{he?p4LL(`7x8Nz*E-yTAoie^3;+%uEJ4)bnh{(6c?Wbo-s6!{G+C!8F*wQ zPiUZMR3JQmS??o-pM1vwka8UI1lBXMXdZnXxa&8#EVQ0#MtvQ4t9&VW2x>MiP!tA* zScR%T8BvJwGYW;(EA2w^`{1p^;VLC9B)?C9yM|hifl>sd#-)`QU!s2oB|WJWS8uQ@ zneT(Qj)SX`X(jW0@YXQvArP7xD4HH<-2oNHR{!w7z*}?bMQJh3%`SFxj4QLVu~=@Y&T zT4T~O99%tx3+$~iDqXC*140S3P?t}dP~}nr(utAH5g#a8KvzBQi*X_vaS|dK>ZRE< zlKe?_{-(bma4DPe4)*UbxcUQ+7xJ*O*}iznI+m6A#2`;R$>SwVw)4zP7w@hC9wLPX zO8%tDQ-K8=NS>rXQ8LZb@P#=Uc;u6j$5L;$^QfO@*WUvk#%h{pGYqZ)z#~JR(rk9U zo#%#63i70rJThT;uRq-ih!kN6Qe*awQrX`aBZis`_ko{l`y zNBY&YeGJdQTQ9;@vPYD+8lHi-R#*>#Yg7b^uFyhC;%hxBNoSx^;3LKwT1nXm?s^R_ z3#FBmjo_^&>mg9O7AP_W;!_VK?W9e9*XJHq@jh9_{=#ZBxit{#Ycy=Le9u<2_S$?rKbSy`!r!OZ^&W- zgl2ooK20yY9$j!99*oygO`<*J)3j9D2;eznarXm7W*Z0iKkvIH3QS;#LKEm7Dc8~_ zV7$s^UIryKt;8jXRKZ{=Q%|4S5X)1{{SlDbw8b^o^xq=Y!nN|w?sxgd71pg;(^dV5xOd}P1 zXT|`MJO+_0kIZ%?_4{ni=o{ewxsjee z|5=2%T)7+efFD5FDCet2%>EU&iGPCrA3Q)50;jBlZ1Z&cC>HpH@jE5< zSb04g+~{|km`*#`2cBgkKuq{Bm(BcLYtQzkcXTT9f(QQNUhopH_+2|uNY4Vj@mZzw z+BtX9xDUQ=AcE)D! zmoLaZXpH6h?eKz(=*{4c59O(48?~*r!c^<9mx3r^PZcDgb13vOfw8x0Rlwt#oI?HV8{G2JWFmSucUmFI^ya zmgt(okqA;c?F^dUIv+lnx#Lfl{jNIAwkC#KZ9)CVl3dVtY6V%_a!Cc)V(^iKAK=z$ z5H9i!;69T$WbJM;WEC_S{^p(zviKPXaAB1v`WkcMH6sB*c6m$HzvGqSrxJC@0B+|l zA&!TGYTc<#IgbWaI7$L))h4pI2FY$H?xhfl+YJ{Pof;(KXA)SFLxtk{8&{ca$>Xm> zdT~Pr!jIWsU~Mu7avzVsit8r@OO>9-CAqpY+L?y&?Ky;XDaLoC#R4lF&14aclA@)s z!eLz*GXL1y(j-3yYdS$(YK^HlRCG3|su7k?uc2h&QfblBuuz73!z+4^t1cr~zXrkF z*Jz?w5(5faf0S#*9PC|N2sELGWmm|omr{lzi}xVbnC zR^lp_%C6JPu3>A-rKQ?FqvhABr7*<+*K%Gf*r{$X7+2zZT_b~2)>T*I7}tbBurjE^ z1{pS(`$0kn+dL;irJV-BZ_7VQs0tUfU%%A?d1`9Wx~<$F=Urxjz-VOzo{v_NqL4Qif}!JRz5?T zX*9Pdn#mem*MB6r(`QK{wIUk!n}ZQHk2R7qH*RXoHx&n&ZIMp>7ELMhGGW!Mfmyko znAd<#dJXBy6QvlgW{VB_Eo5=oFfVAoz5r-jf3IIe#z61c7aQ+r3x{$49GgK@)Wz-Alp?fNws=Z{GdA zg~J*L5N8)myN^Arg6U#ZFz7apyVy%3cL4J!!5b#6Ki5ZYxxC2B{kTj58ox+PguyZ)6bPvrGhsV{C`1HQrMrO>d#` zt4RyZ_R=l0Sx7N?i^CAG0WlYhfF^?jA$VXe7F!<4WlaHT=8#t9e%dMAc)RzWAK8F{ z7E&$vIuAfKgo!nAx{Wt1O;j*T39Zw9CV^!R$g+C59`5C8z4G2j=yLhiT zDlGL|WWh3(aTyxzrT(^taR#xQ z`q}k!hnXqxtT-zLXFg5|pAMv6B(y)XHh0v&pq}`eJHW%$X?(&9KE#ebq%C_!{%A*E zpiba!PiT0_+?S0xXaB(mgXaSaUr3A7y#r4qu7Jd@=0n=s7IVpi&t(y88vbGE3 z>i4k+g}#s%yif}p2dc9Qg?)HjR=$YAh~2-Dd6?xcnR#nOvhFJ};j1Itji_oa&l+=j zU2{&|qY5etBoab^ZS- zc1UT>ke#ntbNH5Z+t{_lNVAG@%NRrz#N`vE&PUR}!SL9A~Ti8@S&@cJJ- z`h3b+SCzPOhl|qkf6L5->O-X-FJE-lCF|!qi3>~dc&sS=E~n zYk=@E+H=USHD9)QtNy1>XVx`WjD2=;dg0fhH^+ojF?w8$k{QfU@{^P6ss<7d9c#X~ z|0^U!P}t(g=7ZsXyY?mYY)zZ9Va$X}B*bAa7aw66MfaQ&_RYudfVk8L#{@Z+y7JwxH6ZcDEV ztv^+4|8o3mxMcmh##4*h^|l^3_1=2m=N%j9RuEd7A>t!c@?*7mqj4S6jo5kX?7?1c zLuTAL@plG(yuaZ2zg35?r@d;Qy@skfRr9)2>uNtG6r(}=tvtu7{vQJz`jYcEsocD< z+y6?QSl1jn?#`W`Ekq0S0H+zG*UZ}N$W(3P|BoSmxc?>e+^fCk!H_eawK<0$w>I9B zk}fO?&4O0jeWzLTLn7%^VOL47|81Qa8#4UC86!z>a{RDbC&|7)O^MeAyV61MWeKpz(78(>$XM z)w2ky&nCTWzbwJ$r2IWPs9frfE{r`VvI`KsAKPvYl=FVZj(4p8WqCXxVWR-5c5>lX z>{B0CD(|Bc`w3-tL;HTU`Z-CQ7l#ttrp4mNnsy8Drf)DeZa#oDv3_6ofD z$}KBo3_*76(7HQGPGIMiA3fSy2d-f~tmEa74D8$$e|sjA<0Vm5l2GBD_49*%3wOK2 zN8&;Qas@7|z>nmGf8j^*%8kt0|8Z2TW^>N^0BHM1&40J2OhZ}b)64cJ5AE2cu~z)J zt?R-ip?}@l_ou-&9$bwgtmDB;u9XaA4A_l|1%3?E0c3DbxynE@QAsL=xAq6sb>2#8u0kx{oM zpmp2uNdhWPaMWmVptX)Fipo;J5=4tt6b%YCARs7ORBV|F>V2M15+tGDd(S=RcmKHO zd_5fBeBM2t{XXyezMLD>*5t369e_cl{Dwa0KF&_4-6U)#XD&*D)0rmPfq8(6>s9Zz5Gkm5C3&B7Kp1XSctA3bJtz2KBx-bcX-A;WJh!(Eqk1UYh09lw( zdTrHa3m-8e? zD?1b?tUgCf2D#g@3020%?=Z;RO$J#(WRPZxY09j(!5CbtAR}`JU{IQqPJUfqLR2WYmco&xXtgdUiY(5_|_7OassbTZ!;^RKR z=8>mq5rYj=cv9Im*KDQSIgMsP*ngt5cIU|@X)#iI83{3+ycbrduy+@puf~Y=?_57j zS9QQbG*1ulD3Bqt`hh6&E6!0vOLMmZqu;uY{N3&vl9dH^H-s1-OUlL|MwNrXo}?-a z?pN}gx)QtcfB8DJ?eR5ehzLmrNN5dJRb!E7A-`jAF{vH9bHk3nG=J4^7~~#b&To|u zJ3zG3%%F=dk0NfKut=mL*$p^X}$WC zj1s@nmnW1WkSA(xw*mSC*$LputJl-42@tgQ93QH`TEO8~*W}LifeC8N5RyUW1TsiJ zN(R?%k-^6gj$b*UdbXPZDpr@aRQFR$k)8Y$@unl@7e<8+zPLIE7XhM|tP|(ZpLRw6 znMy#TF~noz$oKNZU+I=+AN-EhWwc}G8#6~%$uAdBExkU(zn%y1T9f{X3^JaQoyrcy zugXm7x7hvMUlBhVvksGyAI1GiT5n0@ie4e|uM?$PL9jmRU;Jtj72($g(IX79M7MeX znHVdqMG_(w{)u0?!ZS#|+Jq%!TFfu{;zn=`h#RdLpMu)fRMa2*1U;^(J9Af`JQA;S z5^8fqbz~*P73u>R{P7YsacJqlLY3=Q0%_e{bNe%XwVugR(_>xUjuV5I0i2E<0uEgW&W=x-eANMR`kZK7)I&?nJz z{Mr!p3bT$9moh5)TVG+&Sl21sY9+iE*AhxLMJYMPAT~UA-T1cD z>PO63X+;narj$Ne3V~SVMOIVqbzM5%C$b?u!0t86{6l!KGt z#$l|^H`b99XVydP$Y*wgCIU?%7F2raWBr;2pwT(oBLRErbBkuX?S=VDY48zdT26tc zBI~omLNx1gLnuLz+9cX<1HpCC84N}Xk76(?s?`C4Y~caSvQb!$pyVBujcJ=jc^G7i zZmqNyBjf_d`D8jEgGU^@jnhW?y|~sBa%X z*Pag#C_nCXk*A7jysCH7wdh5?{Ts>6F=*d`~Bi}zoV62qxW)m zj~^c_syVZwN}jq8^tBm{+0I^|E58ASwE420vM4)|F5 zo$2cNRApEW0^F$ed9Ivz2qv&Xd5{cBUy^~!>o7)EZY6`7{sC}9tB!6@OamPqIq>e% ze$Oy#-4ilveZQaatHD~x@1mIj#@`QFk1|xHY8i}w{e!R(vAW^{*7dC6%z1tCSG!jD z+HmSiQe%0pd=jOeh!{qrb^QDN|Hyk0z>wC9-x%v@i@_D`eiv1xo4$(WYVo)kkY_}$ zlaD^M9jNnJU7wvB;7%$#Wf@^1?FDE|Xp0xNr>)&#WAkr`k1*cQRI3X5h#xRkqm4m6 zK~8fb{KTb;&;R4Thn$mm|4JX>AGQyk;{?G%G`%f7II3D>vhnk8Pa7NHZ9q~|pBc&+ z;b$xLDtQYtU19@D zN61LtzE+r23zc3Ebu{rQva5rixyyE#_#Cxsi__YrJm0F$EKU4G=(4pozuZ>&Jq`;6 z$;;k(3tfwvw5FFUjXwKYn=3kXQ>_BF;jL@B$SKWNzaNb-@Qp=wrzgrh%zOH8+!Lbx zb+K|%7fpgd+tWOd%4{eI4 z9gP4~Wi=TAA>U6TC-{TNX``%FepycI)vvIkVN!@bVXepoX+gNlK@HZU#r5*U?KDe9 zoplz8v5shl6Q_d3O~+6c+yJH*WrsqZw}WQYSDCX#3V_3UiMU9X65%ozYw8JtY zsXfw9&e=h;?w9m#%QtFLTIT((bnE77P2autw5OSl0utF)C~9w#2kihQxRa45w>yuaTNSkC?^E>6l6b08OztShN6xitHt*)Ae*SuB!aa@<; zZHNkM(o-a|{bBZxhYbcy(&zHeUDH!SeEi4>9{Ip->`j`6J2Y+S23mjznbVKw@a4rE zI+s}Tr~dqdv{pMt?DAB1e$na^374j7DVW!S_NG-s{s)lWSK0BfAKGWXb#e9&(kOX8 zs~bNW4qT|q2w2m6WUZ}(<@a|2g_*xb(FNg1TW^EUjD{wmkv_?3d+XZz7hjK79euAi zVQ3xe9BNfpP^ve*b;SPh)i@ZbZ1q0#1S!AV$1Xmd=$&?x78I zUmvPZa9*b;%ufc~(J|yN1WB_}{%{Yhe2cK%kmPB~N$&b(!TdRYvR+Y~k@D_b7%HsK z3EhM;EBi*Hk66b1OBafj?_52P*`3}7dPPFj=!Vja*C)oq#1L)w+cS1+%Y7DK1T|Eb zy1aEA6LZQykLHj{hq7&KH`@WA*oTag2U3DbX}6lK+=%B3p%d=zg3IL6I~F6REWLUE#n{}Rp%rPsSWvfqmp-9*3Wob@;+tIr3FMG9P|x~fa_1D9C9%4-$~GaMuPnm zJ$#5md>BfSnJ(r9U~YNmIwI`& z9=!%gY0Y|cq8@BK^~tYW-^F;XmXeVzjyxK6mgt56fS|kdj6cr7sBV;a#+d7228c&n z<1df*+1DO_6eQy9Hssc0*$3Eq+@lYQfC=TB#M?lO?*dc{$Bu_*SkX`oGwC0+}DZUW@A)CYEc*VpUsO`uaG73`{+4{lXOrLg;daM0g zzFxa)+S!+qKERr-i%IULgo6}*w2npa+tPAXL^EW{5K9g9gpd6T+6!})Rxyxicjee| z-2{eF;*drt=_Nj;ze5T;n}yCjbY?Udsccir({6Uz^tS}E^(LvgKEH2w>5=^ z^vAkIZ=B--Dl`2trRKI68WlT*=gIdzWLV0--9hUyGx8)GGGspl1Q6Dr-T@^ml0Jd$ z+)J3$4E6vo|Bw#+3RP`jgIOf11MHTnCSe>00uRv{=RIl!kdb1K6;Ir7e98NW-I+_5`OOBJ|Z~k#Uv4BHPvmA(`qkWqdL#kG+kG`1=lXBJR^@%uXqYpgj5RU;eZ{GVx7ez%VTfKLzE07Ks-Om?9^SJo}) zChT5Mn7>z#w#-z)RBSy4g+*cT(rAY~dJD~B34m1T#0wC)=lH++lFZBoD6u{zWcj2t zjlkj~tIG*coq(oC5$9QIHraK@%&(|!eK+o2?UUaizq}cE4hRe%#8us_WTEYT#~PoU zg-$$EUBqFDGK}H^>aQyXa(q%67Te=o)SW&K=`l&`U=>^ZE4xwbg+Q@OTLwgivxD2y zs$&4eD>Ls*!r+7DK#kzy)#($Io=`P5=0-P*DvA)b>B2l)Gd0FXDz68G^^*&Jq1lXU z&2?Hd?tik_EGX@F4yej>yED-JY;iTvIvvIe3d1Duap4>X?f<0mdI}6Ix6bM}C=!!& z4vLJ9dEIHRB@Ixd9nI@_Wh4k=x}2%fXGTqqbP!a~xDMM~pvr&>+C@@x0H_f}5vvH< z=0jw;3u_U`GKEiIHXE*|le3YvLNEnpr6%JitfKX{6HKnSA;S%V-KC2$5Vy8CU{LCX zK)74^>1|tgPx>?&Qj+z+a?pg`i12{w12&oI}Gxu>h%52 z3IgmdA<0^xItzih zrx>P@!T{V>C(7R+2f?ateRr`KG_>o&huFW3qGZ^=RBaMw;@3#w3o?>8rZ0!|3II8z z_4G%CpoS*{F=({j-;LQnceM`FrCo&e@V7Y$Hd(j?_d|Scx@as=X%D9Y*w$xCCwhQE z4!LSBH~?Z?UFJ`S?sZwYWR46We${9FLw;SmLqhu}=Vf|q0k@VTGp$WXh1Kgud)kWy zU|(yCcS$H~dsjMXWtCoE$<4eOtxc(%DCY+B~MW{&6o)N6>^4n;Tp=MkQ5Yp^K_&3~nS{ z$6&hM5)Hb9sp=r2MU?acB#)Nqsu%dRAmj@M$3ki`GA8L0e(kZVfgt04(%*5h8tcn{(-dDN9-l5m=q#+nwaylsk ziao5Wr|H6W_OrUv)3Aj`E3jjx6}8Sj|Bu#r)?XDPmQh#eNO_YT^;-gvSoc6Bgk3A| zP`fA$gq30%dW?L(26tns(}e%_-V#vbDJq6NyqYzl+xV3xdS?qki?G?MXD7iG!!GPD zMB?n2IDNF;AJbkKnOl1M63nrv!78O%W6@a`+TOIgo1p^iBx^5*x$Db}r*H=#b1qmB zg{@5^9rYYz%Ra@VfENasb(GmcGEat-3^GH(HAvXnys?LlU*@DS>v{DdatVrLhRs4H)X4hSF}DNHb_6 z*@j3<8naK39n4-9;E_ney5DZ__yOlCVe6-#29drgUmbq|5o*_olB^$UtugUpFgsk( z`Hm6)tugi!oi4fv-At6TyBfP&m+1rK2lnsng_FP0^_~J+mal~Es;r;Lj%GPW0iM-l zqy%{2wmVElVLec(cRqdol&+5crZKP}*^2L(u%`A*sI7@}9{Bz;=_56tBg zGOe=IowSX8-3Y8eL@vFSz8JAPZOac3sB&LmP?{6E9wS><8OZa_(rt*c7KGi4cBgJ@ zDUlb@aREsQItJ?#X;0vqk^k5O2(J@fcX+5JCG2AjH-zOF-75TxgnHpMJN$}yq)#)H zpl!tOs4f-Tv*h4W@I;I4B;uVxYvEnx|O5{gtWzJEp=<(mae~}U!)q0 z9GgWSu;~xN0~o9q)*!-piO*BR8iySx=@T8!q3>+NKrgicTH5x2~f)(u(Ys*cVG#RHQr8vfLPGF zk>xgVqY)F-pf}iyn3NmQ@&~2jtXMc5N+U5S>Io=i@rrMO>6FGlcg*t*z zg-?p>+&QVw2&nwmmqcf+a}2`gMp04&fLB{lWdj&iNZTu+c92CUH+H3Jv%1m@=k+A7 zPpHa%Te`R$_R^qnSwluZ*suEg8ZfQ0*4)12^fW&mqI;TwR2MdgcHvr#5N|itbKk_h zF0h@B%C&oiz1ki2jehM5!8NX~ZFjc~91u=6KC{c6(8)_9D>DL;Ug~^@ss7gajtV;^ z({-0-KWl6W8|c!OW=bw{(-vT~y1O%331uC^DmDxKGeY10C;eP6K0csZ0{ZNy3!RpN ziy>^lT27tw0kQgiCCU4`vOch9J*#JB10crKzr2S*>%`v?I!jxkC==qw4_5oZgJbt# z1g1U5zn4e-2CN~sr!KW>O7G?_0IK)tyI56g=Fn&z2*)ENk=1N+ZBh$fa^i}XXrp%nDPkfBBhoBl4IFV{2QQB6f_11+%BsXr`fS)tbX1nhVjxv^u|>YmCwufoS*JCsY7d33#sXt8E zHY;z7IrG(8OQ=uu1cRYh(G4lU{94-G7N46?`!Obt-5vg0g-I*fv2BGn+S#H;A7Pp^ zs6*f%nN`%@>|mzvv;h;@@z%ADIIAy`R0D}@pPb z*POtU^DfbtgxzzENS-BZZ}RVkb!*XorSwy?wGIH|tRHVLoYl>VqZQcuMW;|T#&tDW zgI|ljSPz)LufgV{B0^6_h1D5hTa2~^3&wNTKg!5GTbq;0j&t3l2{^oRFqLN2dsR1) zp%p}ikB@uFAFcrmn^Du!QD!(ti_Rbirxw#01zb}F-J{{(Tw&OnZl{kmQ_yC2xD4fxtePg3RT+% zb)lLh6u$<4&cF4|E48j2!cz8qt^5i$ukU4A+TinCHDa?4F38O;(F_6#{ac?TRB5$D zaIak34PX$(W|V$9+)aJ}-gs@g-7vaV#`Y2aD_2~mSqkb5x;b%GWG5UMHcpobZVWj7 zyMz=bwDbv`O(t#cJPTSVt~VHrekv9Y_uC66bkhuh4y~Q;RGTM{_=DQZuB~v^H{?cV z7cI%}S)H{HmnB!6rCYN~JjCy5Lb*Ura|yB+Q}tM8fzoI7Ej@u5{ZPc>9(rb0khs=W z1OkF6w~|3=D|Q(VevAfRhSq~*r0O&oRA!JtO$!;+%ft1_QT(bA79fj$NS6ntfk+~3 zDD5T)VA4K56=^Kz4o_mMST3$Q!EHvhoBUl$|X;sPGN-pAWBEWO;-s%JcG z7pPNxL|ZkfnS*zL*KgI2@9MW|syH6Dkebhe*b2Huy-*5ZNyGCH_uoUKRNPwhRH>Lzh?)f<%Wi;;W?v+t%80~ zoS{Rb=oE_aRtn)49I41K|bOz?)WVpv{pS zef?PN=!-Y2M_)5mJ^Ftfu?MP~IC^U4HXQxsvDnLBx8dkr#^UIU>##%BV<284-=-dY z`M;^*gm&~Zu@=f7*smuWU>uU(J&ccSoqIq)p`GNJ^)al85cH(vQ*L91d6gq^mj z#(F=GhI+XKcDlF$IxT5+fZCF8VCFy9W9Bw9$Xr$tA>B%q@;F;oakpRL?QZ1nB|L(D zTKn`}YI5)R=RB*vR2ubg1=e^LYYbn5HMYIQ8gm@1+^IY^bCr#uZ{|(fo@I^(`%W&J zarV?eS_FkR_#3kVmmygWRvZbZ_6<(k&)9>!E7$|Sb=ZU7&5ZZmbc+|TDVrRuYznxH zylgXL6N1l`DMGQCQ}wCIY>9wd8bR$z@x6roEqoY3rLv_v&5YU3lxb_A7XD_8=Rb&0 z(E1ccSU@f5BcY=dFRbChOKezaj}6PQ;diT{h-3~Hd36Uz<&Bxc`(fs+S{&5@0(0~C z-J}_EXpual+CcNgt88C=_ycpdc;l=(V(!!$%>4sq=I+6&H?6`k4Z*4p-HMuaqpZ4tnF}*Y` zjQ<2Hia` z=VQZ?^Vo2RB{a-Ak44&MVqHg#a3O;+vk)^={4w*cbC}u5(#lW5;lu`Ux9#KYRPqD% zU}i}WW*#(CH{s*XO6Ro4dNtZX(icH^_-6>f>+CLg-BEP2w7VvNs= z!di;`v6l1!*rXR$a^W;qqQpu#Ka>5Nftf?e{!JzOH^9A^;>=}f=rchQ==1qQ(kzgU zRGotIlEYX&bsv_WNS608($EpfMhlXTNJbkOn*bR(;ef3s|3Z=xn@LDUXhX5FJOPps zfiX<7Gi6F3bb``u0P)^m!aK=Gnv=T?L91$xg(}X*V8f4kV8g8nX!wl}j=mhrq<#p8 zQ4WLLG|3CheB?(Q3FBGnZrLj!mf&rG#%2pYNSGe(s^$ z8c=7D2ssVS^j~!3JF&-MzQUC^SI00(!%H zqM*?^@!_0$Y6Fq7UpCREWvAuaC=kO`M+&>jGUp8ed*21}s@f*D7q*4q? zq9riG-gB$5hA(!|FrUDN^P;fp&8AQ!JR6HdjzEG*sb@iM?+VO)5_2a8+yjICxIZt24$D%TUg0j8;GZv!NM3PBA~`{n!>T;_E-fk78{<= z!-fs9;kMslWUneAGCv9m=VU^URUt43ZwABUHA4}}Cdg)!3)$K#Alttp$hHQvWrie2 z02Yjou=hLZKCNa8g~Q_h+^2@`NOQ?ttDvP*Jt(0DyqlWJyp3E0RxfT?c>oWFxih@t)|j4bC=qz? zlKCZ0aK6M`j@-!Pc2LkxSs(S7pQJCF)3R~C0V{&b`aYbsFp#xG!pcgoqR}GU;%`E! z2-`ZT%d(Td{WY(crJgU4p6}oEc*K}MJee?S&S~NNI&nCM70JWGi=$af6IshsSy%p} zin4jpAQmD;C_G7ug&sk(Rt(Mpa$xkFXTC5rdgS_^NfdZIa`ra z&b2tb%F%6Kzu(M>6sC}GfddlE>RiMpTNeFwd+jszfcUIM;W(gW$*dKedB%~_Wr+-S zoYZaCxVcBRnf^sFeUx_{iKEfowB7YNJxucXCbuOff99Fo=bJvsGkqp8`wzSWmSH3g;nZjYA z^dDrEJFl3c;DT5ID^&X!U<&7oKKa0livUwvWLXC=1>g$Gl=-4_=Lx1M$0R?(V!X`V zD6~r(e|2difA-fl4yMm5W=J&3!Pzct^iAo~#!?b))euNM&wS`=A`w{JmolaT;7lJJ z$fGg(e_pJCJ$ znG*u(aue!!dBuEs*(j`?% z^?+>2h}JflUqb1xoei>#DU@D}-!mHs`hi|F=_UD={q!xgwm&mX4$$|~sW&o@$>{+s zdxf~bU3$snK=k{ob{XF|ivD%+y|j>H#sIum$tNG9_oaDVkVhY*FD6)FcuMq!QEWiX z!SlSfHKNz>Q}q&KMX{{dfW~dGOQN~GN8>C|4K77jWL3%*aJgIKpL7CTpw|l1pjazQ z2rPR_mr)TS0qw-lDEO*YQ_M)vd z#w~LzLU~lR&KIuN5{8}Aw<|7e_ltU~X+K8XLB;c1ohNwUh<@ZR&?Qy6C-}$-Ra=*X z1Eo0RJi332GBM%$9a~7KyA4y2R+MxG6NE|_s49Cq%!IJD|5lg?0`C{oUcvA*E5Ux5 zelJed`>ZaY>YPb|wjU{dA`inATG=!m!xng&Mb=nFf#YJRT{g5hLI{uF1XQ;64At9H z0eky-*v0iDy6z_)X!rA1-N0fof9Ta)hn_kcpx)e&D#);2)B;1S%$NYDA;gUbH$p;1 zQZ*!G#_ZHTXc;`cz3>i8$6^jQ)!_lWP*ymh*RPQcu4e|5Cp0>*$^*B)ZFY$|dywgG zU31>KzJz8yrdJYt9a}1FX&s4zp>;hRl$G#4C!y>W^lyXkES%P;X}PAkn1WDWo!1NZ zpsrv_))rXK-E~hkLESNS4N#;_q`-un3K;9|dV@h#TKFC6Tm;uwUE&0dbB7as%0)Vk zHROSr^82J6t@FN|_`cw|zq@taxo9q&F7*-jEhY3|3T|uLrmbI#T^R|7@KnJ_p%VI0 zmmUFi993-v@6AUiAbNwfewrJhw-R6oJmaXl5}t2et@V3Xk(Dr~13EEE*aG#`r~d;jq^PXm z`~bKUldjC7t>7(Re{C?h>8I4QwnM^=kiO9LsHh}%91&0t_kw}FvK-9HvxSoJKC9c~ z!zved!ve5UFVa83o6=tB4-`Omw{X3!K6!%sTaH}P^IFYFD7(f4dPC$YUqY9f7TAq~ zE@g`+>Dj!&wYX(Bq{k$ALHg*JKlShes6gvP%y2Oz1~P0GU53FnP;ID2ZW2Z6z`iD<7& z{s1+_fQs(ArNONwgNT`;UmP|%-1s^W1EbdiN$@f+{lkr?lgnrIUj1y_6$X z>2wk3kk_9x-9f)|-PZU?uDAjxvRD5_?L48jlgfkb#01w5uqA)SUe}Q& zH=zmO+gh(13qcx6{_6Um@qBpe+Oee@7L9^N2kC+vOg_;fJ`40v_bdkBn;u|#0u7DX zLMze5o5H!q8Xl}>!EjEyReV5yR)R0WIek5lJKD|$u@!ck3GG zXm1FgjrN8t`Ogoijs!$TVIU<&EA@cUHjQ5=2O89vXv7Qm6 z-2&_opnoqx_5UNP|A}CC@U@Xh#>6u3a4p7D>?dwu8Jloz7_t$VvurUMER>it{AaAW zWB8oRW53aN(L+FG?NssSu)Z_)s)3$xjTQ;aHM-=LA?};cGm}<#;IC=d%4f&%e;7-;*Ipg>4Opg@FEGF>jwN3jS?n6+MvM7x}^B3_AEZOi+VDCy<6DW~L+IkcWn@Evy*|K~aDqF_=yc_e$!g(0! zC!;$YnEK(%mv1e&TrTLR@P8yUL($&&+*k|Oh6lps-V9$tbC-R^X#cB>o0>O=8^#xz zgL}DsPjq!Bn?$TkA~SVC@ys4X51}{m&8^HlH?hW^t z$}3O(G2fl8WLP`Afmf_q-}CMk$HVR8>bIMdXDotys*ebu8O$4t1HA)>X)Z7ioGwicjZR$;)yHyn!HxPvEpw7RwDlg$7I(R` z1kNitFIe+UMsTUa?(%{Jrcvab%*Q{5;n50(qr3e|=7nTW#%Agt_svf!J}C&r>_npto1;@LFVLR3 zk*Suc`HuMOQnF=E{=C(3d9t|_xOLqIo?_T#J6 zK3@Kz?9C@oZ`C+#Jbkj1+t##Yd{B$3FQl7+BKwEV9JaZR^YjC0uf0&IZokCOZT$?Y7usw4I3MK~| z?{TIuP8Oa6N)x95N^=E%@PYR5kO)C@YHQ+slEz#hX^bVrb)hl)kN?2__A`CeE=sWg zC(YZdmP)ylYRC zbrOZpu>j0!qOi&ao+!5$47(f6RV=d~UCw9RavvvSFj-Fb%rE4xg_t}Byen~%anm&3 z`rdH&Y0-`VpBM6@Z~kI()x@8?K_6ZUr?A{SfmR55<0BNky*pn*nDP@qV>An(@jb!U zKAgVZx5#|Fn&K5#O!l10L9BD-TiXLgkxIBW66&-RC{~Qcxc=Hw;u%Heftpgy+dZf9 zp;TO-X&?qU2sD@2Q%pot;K>-;D10mGPb19}SxEy#2div{5#$3qqSM#s%2vS3M*|hM zBKF_hLyL-2tKLbz-)6TJe_w%1?Vr-&#iHWt;aMjN=@AE|Dwzv?xMVI(lq?Ho`j;&> zEY#eQpL}&WJi0TM4;Se-7{9Ad)jWKmtmzs8lPGwpeNbMC7nxQFX29D!iWkd!Z8BomBW^(XnZZ(Q!o0!Nr2OLdi zP@OF;Rx`HkRzGo&87wfB-ZPJftN*u2r9gNX-@{`A*QEX9;jM?>@B+c6Jj3q;uMiTW z;pJRh-rk15{q4P7vSRzk$i(Jg*xnj&>p_~Y_)5Sl4^JKsoDU4djGywx!Q(6nzCr(5>K20%crN2Cph|v^o9=YNDb6?0 zSy320j^88S{U$gd^UAp;<9K(g43dFPdkn>P2#S$F(8{rfS?zlnmcX6rX0j$a-ziz@ zlq>+0Je40!zXiqV1LA#Q8JeLu&*&@^pE&}HpOP49QIASg6hx0p>;*U!XmlDLj?lab za&NX@>UgiI(MH-ANv`%aAAF+7{760s2?u21MxMQ-^MUCU7LZcFSzfWi@C?`J7p~Ds zkOz!laSe=U194n1!KY;QuM~%GDP~K0ZMvyA$a&ASA3ZhIh~D?ePWQHCmTX`dd!N9Y z69$xR1kv?ol7+XJaLM24r93!UT>vK2%9eRNIPsMRv30YTLo65w!8p2vLJIb3H}#pS z)zT{Ex@Fw9bfGQ#hS>TcipyG0*vK0!SwhIKU}y0ZK4pR-G`-oD{aM(|jEgGGoNNtt zjL^FE3-weFxE9dXBqmpNQaWs|S(S3@vhZmK>22w&Y;E?xpxh9~vUc>L7!T`^hJ~Dz zx)|p9nh;g0Crgw4XsMi=PHn9dNZTsaPx-s|R`M3k(_z}QraSsp7h+NbIT)`0m*+O? z@XXxfYjCx`WSJF1`p}va6RJAHFwm-1lNfYv5r6zGr;#(i>Db>mg2CzesHvCbL-Z!T zy@J=~71hFwBTC!$qMz}X;ha*^&-nJzXyC;U(GqL`9PVxDlVxPY4O8H91W1{%4(MvI zX{7-CJo3Ibwjs9^KyqHUQbA?VhFJ{EkidR!ffdJ>a-kH&^G7Q{(yvbEI3Cw^yqD4A!*w_*1QdbS)qNF>t-^Jo&AI5hoB27*Y< zHv^_e@L9nAlHnHztoeB$Zs$RF)NIh5hNAXmk`3BJ9I(Madeg9yp6onDutC5>0hLC3 z{w?PZHpSrpu);6n{xPul%+UJlo*_3IW?3-b5IL_~yZbEL-78pgABYs$bmc=;c+;SqVtQVQU#tdlSgu z8fyKvm(4wWfqe2>sKo|pR*>ekqGD-@%{{{0MpH;?0mRV;FiF6^wT1eX|L(@$9N@)? zTtzB%FKa1}lFnLAxyne5UYvLNrs*^eFyJY?a>)^(zNtLO^N_WYM_JDTq@x($G)))| zlDZh5!lAHbRGWJic0j(OP!e5I#c(B9Od2w3&lkw3?U7N_nVKGLHv%rKVJH0uK6V#) z04YtNghun#87QpL6M$GN9H3OJhqly-DZqs_rM3-x;!gF1Qu}yNY8}H_`=}u2{8)Hz zrh{ujEbcvUkmi}Y`)9I%3#(t%8+H8e#fBH*fxEMe6aC|N^59oJ{PMMko68az?=UH(EH=!N8cnzKh)C+o)jV$_sr{|Di0wx;UEDioXx6ry z8UbMEx#SXVA~9DsOw*}X7f)4hrC8#-awhp)HzUi!-?)ta(PbLF#M^SFJ)=8+6|B0% zx9-gJt>qkd6Bizj?Hjm>=f{kj8Ccj;%dU^kWqLA(Pvs&jWU+h!M0P3o1hbBHj0@a( zA@Hf!)c(5!Z)pPyLD{h&#dCpa4zT^jkY>0@k_epJp?m`TK@8YF(-f^60Y7I&Oo7O>fHE+~f!tAY!7Q{O`-7QhXkRq*cXihaWfN#IV$%Li6ps=KWh3U8)(Vbq?e=w zP~{3oMstriEt-~FPXk#026^CyL<8;XZPFDd#t%n+9Iuc{2(C@Cbj7c6@WdT3Xy&9U zX`;a`x1jA&dF~2FVEr=3z;)1p-kYkPqP+4-y2PNlAnu-d35W_#kiIz2U{gNy8bYon zT-PZo95@sJ?MG0d{TQB+;3)${1*#t`)rt2gDsapd?gZT}-Z!vt(<^vg%nAvL(MisM zM~;D0CKoV4^qT$#c_-J1?y$|=$kA;B0gSiji7KhA73TF4ublF>08~1obDWYnb_ z2B#$kzhdp$pHr)hZkpOphu&UDHiF5P_X5}%nC2T8&U1lx$l4}=aFgmgh|!nTmo~wA ztaD$Mpd^&B8jJ-zYf@cJ%jf@5ohoy`Aae(?z+jU0r@M3zOq}CDP6|vcvGFWFE#F;% zQ9p8x;vkGB!}1jX=RckpVZYLl8kTo?ZPS+7iPrO_3kp)HL3RqtJ@dSLzM;V&;NcIRFdEzG!7fM-CC{J8Ld17%W$`e1s&$`ngC{O%OfNaq)aREdVL)Eul zK5DY2&v2%b7kfT|{A;3$nX&*CA+0K1-?NM0QR}KBhZKY)ORFj;E(0^Xza9L9i*~qe z*ZbPBeMA{7kTx<7+uDGt2^*U%*@KW2DK+SRZRysPL^$x~51F&<$VAZ-mh5=QMKVz} z8!E^aTA6TO9LgeN>L!ZF+}972xj&1}vTiRNDG5-ekb+8S#tKqU$*{C_+edLPcyHh% z{8cNfL?!*Al~qb73BHOP1SxZb#%Xo!o!^(?;gXpRXU6GCMROMmD2EmcICFl}R#dqT z_HNgPCS9af!wVw;!p>)<#`8EYEQs2`qW1y!D>?k}e#(HE5_!Hh&(b;Tq2G|E}!99qwdoVa0&AE(@ z(=F*}UYrM!n+N-W)1kzb6TO1k-@k-DVOq`1CzjwRVZUWB_fOfPzrqGb%fO|M!D0E% zwaJ|q2)>skk7;gH+3mm}tBL4t$H|;W8D`4y*7{b=Ce4$+7pyG6k9~A{GC0f@pJPs< zfJnvQ(7SJ;(j|V`kx8U?-N6~9pdY%cz`Ky;87yQ=G=U<$r%DUg4$2BPk2S%7;#i4QHAInVG zam%>7WTO)0jxXs`GMBuhM;a1;Rb3szD;{pb&JVAN6QJkJs07ko2T4|k(~LoWEjAlsL#Tr_T>xZwHA6=QQ~K{f_7Ie zv2muv_*$|t91_ndHnyuXt{5sA=N%u5E4M4wCQ)H?NC3`FBM;oPJu9$HmfEFJ?Fy*& zIfTHWSYclXZcYNnD$>X4&^V_y#mMAwqt4zOl}hKZ37*8)EiLw|*HhNGfjRUs1+imN z^9P>w9h8+nrg+>~$zxg;-XlZ?J)FyfJy(?2VkAKjs-+$uih{ zcH8AL197x5@yZ~^ZD%57JHa#FYh$>dyM3Rx;ek1ZKlyFH8ovEt^me&qyBb`N&wJ0% z5R!a~X~1A!ynrG@y(cg8ZX5j;&2%8FcP~buI|mkdiu`CheGQx3COF0}`6+}sQ~qWV zgB|qm7v99=(h)y4j>yjb;Ipgeq2Ue}yRi-vyB7@kl!s^Z_Vj;%Ls1aoTEJ&>(o zpZ`33q9pdD%`c~HeyRF)SI^?*DNpdPW9XVdDX2c23GzQX=ov(!$xII#t;ZzhN{vCK ziuJq~ymmZ?M~QRTWo&!l88^OXEFE6-4vbxG^epeJw@qY06+Y%%Qgwvp%~@`?L-T65 zr{5W_S)AIy6&;&CcVHRaLBxJp1&==-=&N}WdKIt75_nwtpmfK?zyooAR)rJ0NYlTx zcT*x)ZFiU27APGU>oj(o<=g;ixaI-NWLBevNoN8-fEv-6&}?DRnGhZz{hH8fVGjwU zthzXG;*R0!jfCZi0qZ$sDDe?sN<1s|Vagn|t-lH8ktvu!{gw%nK$`!hwoSo-oe9!! znYRaO6FP!96Vy_273)MVwX`X4OdR`>}Jpof3}O;^J6yG8s7 zHN)-`!vi03O>EqM`XmLp?03||?sryk!A!G3BdQuT!SGa`TFMSLxZAe$cj8 zXf}g3P}`pLEgMYv-vmB%GkXFH92e_(lf@X#X7E<>FO1_;Y)FqaG-%7?zR+z4M=2;D z;zxp+J=88n!Tpfd)7E5Pm})WAc}(!aham2Rkj$RyJT`cZ@nXCL$ibAgAP_E(SCW?T zUh+?0=-k2Jyt_i-Y{KVTRHcK3AUVUzB9F_soSw|4L{yz;vN$GjiNQeQ<^ZvaPWNTF zn430{kpDfY(+A5}nKS+a@`?ZBkRQld=v`7h=GKYeZPWMq8d?Z?TlfyJpnH<`aNL#S zei=%keADxfh^fEuddctgVhpBotn35`t5nX>B|A{=_h{7Fe>==CttqCP?ya5Y{5RI zMVRolL=muE#9-6r;9;LCDWKrL+`zF&8)WpD{ zcn@dhEh|tfPT?EIuv<9wJwWlX8V`bQY5H6u2%b9#G^AqOz@kjhR-t&y(PH8C-~(La zF>175S2fyKvjr(i9sP$|eb16MaQl{Zwilnkxo19}Zn>Kl@qj;J9KY-v(95-oZ}wVH zdAyP_4F49DDSy|0J&h#)H4Q6!((*RzWRvSZr zwLb9E;%;h>$uq0)Ip;Pyj2BKzmGciVz#)3vbTB$J8r15i{99CF77a{IXVH?p zG(=0+DAV1Y&95?40Gdlj?VH6j*MesBBmm6_c*8Vtq&<@IX)apeqvsk;%Xd4_Ny;Uc zWcxW&`gW4?_va~nwWOTRnKv-e5J>sV=V+hF8lt7;Wxe`4E;hUlw7lxT?w8f!GdYgS zKXc-%Xw0Xk`QHMGQo~WpLYqa0Q0x4@Cs@rq6y^#MXUE>;WT}>Z<~#{!WDEFHWHOeP zK669HnZIF0j5IW|E!4uf&~#cVVaMp-*KH}VDtswBnU&=^25LL`Q~GB`#k z`#diik55X(gZX&Sj}mw!u7`g0B=;zM^p`Ww$;k;p~ z6a}BXMcQW)x@U9A(-Qb236vxBnx;f>DU{~_`Sokq8^_CJcrm&*a@}??1hJ04FJQF*$9w{A6EQ1=pgFKtS(3%o`EFQ<9=! zuZxu&VlYh?YLtxdjIDjjO!H;t79NQjJ%moEY!8KxVDFg{Sj^W-SF2ZSfZE2pcd4x^ zMPRfpmVd2FZB?0D2WsUr%BAG`9rf6qCKu3Aa)xRXY=*kH?wl)&N%?|HcC1Oh^#6$yl>H|sNcw-t3I6&& z%n8~woM7|+7o5QU|Be%6@?9ABj7^t3S}a{q=1okLHyn8o zU9oQ?sI&#SpZ^qgJUL$p8gQQ<|NJz$g*)gYXuv(bd^Fvfv7!Cb@QKlP-~Aq&^6}~^ z$X+$^rM$54b=ePeZHu-$hhU#6qaOj^tBjuov7uR&8WWI7}`5W>Virj9y4Y=*r z0oC7_cRn5ezCd=+Ch+gyFY;MK8JTK*JKwCUzFqY{>D#Mx_3dOmecQXMzMb~p`ZiZz z-=3LlJ`a5-)bi8&I0Hqef?E*GE$@_c#xQSoCfo}K4VyM9iii?$0XIxg8AhEk z`c=mTHJ~D*V%!iF?Ig|Sj*bH38qgU6E`uw$Cy0tW?u=_dM{&n>P*LAoopg1aXP);w z?>WD7e*Zk4GefGoZgqEcb>Hgyx%XC6xl$nuN3$v8uV6w72Aea#I44+8lPuN1jOsf< z68$yZvv~O*g6{;)_}<_@YHS2^y zaLdlwD|p1!X6yC|?s5~e$L$x~x0D1_*r`PV!jez3x?@D;G@eZWInpg@QeQFB04#IbkW~j z6%kO0WIwtzV@E`tYdjT3gz>a$xPEN8y?xaVqOSs=HKgOJZZB3J>P#+SgRP#qOdTubEb3s~ch0vk7O(NXf?>~>x{@J+dfQjqxyH%R^^dyUQTC&fG!?^6q8q7; zgnOh@A&*+btgXV>e#{s4)?tIgr!v+t)o5qNXrLeE%~YP2x<>8E7!71Uhud6lm-B>` z@J%Sw=oW&zGq{u4otYE;0kE3HaKJSd=71dG)Ke|N#7^S=d(4jBUCP;E9>3u zXGaJpec#|lO9#V%-Qr*sCZ{%FPHMs022IK`;8Qe+2cw&W5NL~*zp9l+F+c}|ew#8P zme^o3D?m{+RLJvyC!tqks0*>%7W)-oF}2 zIBQQK`&In+*nSmxz}k1&lU3M$HN2yeY?QlbD>HtntOi5+G6t-q9lsfZFIA;!`XK|B z^d`X=3DiEKuvq7tXqHr|9LZn3l8WZu))@^YgWs%F3Yz(qC&<3EvRCe;*mvLWcmfRj zl3x@#2|10LIww-s*a4-^!UsjpTKco8sbH zN>V~i`@sQJ=Nju=13P{4yU8AarOy@GJJe3lJ;%3*(k~Gv3v;2u3#c$SS6M_!hM}2d zG^n2INm_d8&V{xNfQ*$v-=Z~`03bW$zQN3|y#+0iS+{+u?8DxDEhVeQtw%riAAz6b z`g%}Bb^Ee|{&w=2EIxyvgrPC(?PRPgMAZFgGJLD5c$zfeWPs2csSRm zL4>;gAfnaEV&o?NaKAaoB}UBAk6a_@HgzhpnW)Djn~Bh4v>L+d&s73det=R=Na5V+ zf6)Yo6GLnGWLm>_Dh{#;9`e7@@cL(Y59?1&PoRiSYonY@g7m50yOD;MXA@i}2{_PY z&p?{q>!jcocW!pwNx>!1^k+^9E_2n{8_NZoxaW#{o)#n^MIU>*z=#xm=C_L8GBIyH zR`dr!(W_`hpYo&9xu=7o*G)+Vo&M}wMQ@z42y1^<(bsw)m2Uh-(KnOVt;6b{RrI>- zO9upQtsdN*x#eF}yS@ij?a#Bt2L(My+1Sb@tdizpCCwcRO1dvn(%on!y@XZL?*pt2 zw!~s3O@%f17;d_Wm9+ID?-*9n;qe6;q@+3DDCsi&bg$jCjwX?k4rZ0Kf>qMm_`F6b zjFRRSI)!7EyLFTT@6Z=I^~F23k5c0ukV+u}@7Oy^yNprNmPPFt9c|f0E{`Cv$}MUt z&SO-rY>I{5vAd}Z?@&xp`Cy5?P32PA2K2O01;K!~ac5V;!v~6Wf8_-TF@@ zJ*gO!wEmw;Iy12YRNnqiB^~p9gH!*ir023qIuVq#87XN6QqndZtp^=V+LSc$uS&Y- zKa{k7T`GUaH%i(ED`_93r0;{0UQR3N{;ZNF4jB6`t7et-z>bPWxyg1NO@anm)8Hqp zq}4lq;~^zY{8LH$`JC$Iprj3;q%AjfM&ACxPZ=d$kCn6-lyobkq>GS}&Ro?5Dd`Lc zwW_q~=#tE8z-w31%xsHDv{CGF>+q~$gxUEt7~`5CRGPdF%PYA>s#l}Jgym<^|+ zu`6k;qi>Qc$|Jr}(pIFTHUCi3REcN2VB-HkNn3WYO8W4>DQOi_(pAD=_3klQ+=j0S z(lnw`Ocv1)lsdQJvL5n;%2h3VV%@=Y#IW0a^{SDEUN9LD^isSgC@s9)ITPT&w13y^ z(-?ieuNU)cFH**1ZnZ<2Jr`;A(f&xYgT7xyjz&tpj%!oxWKJ)+ddjq`krRQtXbz#=^TN+ICN(gJA1RqG6jUT4?! zoWr2&&3A0NzVrp{=1%K+>octDt7f7GKTbosUNgNx-l@qs(fxkNq!U)^W{y^hmC6KE zqih-%(#n-|Nb=Z1q;X_9P-px}NUcang7+gVp0NuBlku0Ki2)r3R*u5Jka59AM%ufe zFZwUEzTZ?_by+aZ=^K6j?HBz9vXPs>-E~grpc$rXxx=@rdHvMFRJF?qrPLPj1%{~d zuV=vy@af^GlY!m_A}GT6#rNjx`HO$~1SYIy^($8-73RNN2_~#9)*zoh3e|} zFME}C_k8jis@H7IdH-bKp8cqL@SwXh-j=bNs2_QT+h#D_-c=nthU%uR>sJR%UtRSDs>f}(aOB1Au2F_E zId{+vZNmQen=7F!txL#`F0bcJtDbapStH|~g|!1aAw!$zpJ6TE~dx(R-bO>AB5_E9C`m?@l#Wam&fI#YQXx(9ZTCBe)1_e zLixov{$i&?7h3iP+h_O3XFng@`RNSQ&t15>=iU9gN4!C%(v=^^9zAsAMtyLEHSbHu zkNJLoPk@W9?Yc8BWcs=ZccA*-Wt)=8>X{wDRLZ@3tYuKU6FP=D{-464o3 zL;fy}#|v65Ik+=vWl0kBrDe*EML+)j_{h2sa5k^2lioi$a4;Dz_UhAZM-MJo{c#(# z-e&Xuh4&5y-hB#vBi-EIdCQ)q$37_{ba{0*Uv8a7zW)UEYsgRe^Exh#IScjU$@I8E zr7?@&K>abtijKb7Te#)3BEpe8b{j*P>FN0C9 z&-wH*@X6C>)o`)qrG=xe?5=xw5ULw{l6z+MytVZsR6p4Le(xop5=O(I*LVE1+nGGu zZQ~WVSVi8`)7L-b9Gy|VtkH&-8}nPZ_SI@7^d)y0Dc0g*7v>Qs;{_(Jr%nYynon1O*Zw_&C&wdu-B$kxCZvmDN65Qu$k?+aqMWJpG*e4Z9&C(2=9LQvhp zJ}`+(V#a4_JV8_bcsj1DYlPE|yk?z;HrmPatyr=~X13Ygbb4>$3z)y{Z2+^&*-?y{ zMwiY#3?COS(7bGnr`__x5zd#FpVjsu0yW@2A{Ioe@juk+Ua8+^usf-2vPE75Y4B& z>S_!1IN@*oSuT#zT2$v6XF}x139eqRKv6d@z(Wj$Uo!3;wR0;EoNJtej5llaUYRPV zt*sd2P1j~HFMJ2{!fW)r&}A+*-jrJiW3w%@Ct+iax5O~N(UQ5?!=uJR6g7*sMc3T6 z-pC?dq9W2gdCA=%Di)H@F?L;5zL)G*UnLkLB0~8mB$0hxPckk@&sY~Ecj1S|*0Xlo zpx#}m*99`R!AWuf|0rXCG)Bv0b<8J4W|~UP3XvW{y&g+Vkqbnh7}vlDoAQ_j_C^~6 zRk*p5Xqor}(_C$uO7Va>2d&p0Y)&lEzcIZLbz+^NDm#?V`$f4__$ev z(lTu4G9%3@{2IZGij1hihR;ls`qTL1#!^my+>pp#*(QCBu{UFaG|q&t8#6l2Zeb2O zPML=$UNN8ooE~ivIj!8EorkEG5HyNK3D|JHW1eV-`A;+jEn_jTG+H`dlp{v88-AV}e!BDq(&qXB|*mYQX2p+y{Rm4W%|r$-+RQ#1!gQ2c;v7VX5E0;>r4}W^iOCUbaW`sYuG&rmG$ObpwP>qK)MRfa z(>wL#Sny4(|3Sfd+g5bnAlwE?W9?=*r59zKzRY6_Hbj;OiR&Xw!TwAIvlnHZj=h!k zj`lC{hG~BIBe2E2;5203Bk8YZ4xO0AL6h++SGOL-@NV*^TiwL|>UR4MI``F*dc!lJ zCBa@>LA8*q*s?79a#fmcWvS+%&z!z(E2ZJNA?z0oKUqSXB1;{%Q41wBt*&<1#wR7h zHkDEXzdI{SWlGXed#%}Rq?*54LVxM>V>ha)UnDHBR-Ptm4pyJ(50B|c7kEs;>6tNA zYJB-E$Nx5xhdwA$dMay;W?drsLi-KumX-W4{7E!6SUxH}Io7!j86@9xE>R;5VaOn9 zkdlHh+9GLf80-86_Ue1}mVMM@aomtaQlHX0#}~YKz#{3zb&GZG8x0mop9pd4%~i_6 zC|-_lWqMWGyj>e(vh;5Kef;L{+GsK-GPg(9mVe7Rev1c?rmnU&fnG2~;Z>%n3pmeQOhGT1|Ppp=F9P{k5M0T#L&EH+^w z-C4qfg@}%n84Hp80}{iwM1S*yEMAZ28Qj!JlJ_k9c&uRua1?SN5xmQZ3TYam6iJy9Xy zvHcg`Nt}fbB|A@Ou#l!RWn_geOL%Pe_3tF!goPC62^|))cBV)IcLo2dgva*ZkWS*? zv6T8eVZ>6JE|ifKx-Q{u!<{~=lX&ON0?scZ4axF^NY7jCV%^m~O$Dd0eB{AgTBPDbaFT+CS^F$~XB0?w*+Y@&r zhOJmAv6FZW7P6ivM#N7dndc*f(y>zaB!=Iy)S^z}4OohEkw}Wq=S9Xk*V?tX+4wy* z3i4INJHv}_gsKTZ2p%{QbEp>51jsH?S+gK)R^Mb4 z$pqsDRa{cePRRF2EK!LK4P0ZgI87NSc~_x;F^(G5@myGf zYF)s&e{8fdrb&HpoRg(V$Iy(Cyb*F!C^1|%HoC;ex(&{uD1C(e9F&qXos%~1ZeIzb zo3w=(e3J0Ee=zr_wiDz;viA$oo6-^fV(b>woBG@NHuKEtdQ(d6X66z3A~9GNGHuj% z5{v(4-YIn#2|p~v=}VbeAyOta9LG|9J8N5P#ifYO;??h&Hi%1v`X+N=V_!-$nK>~3 zJ1OsXEc8=n@j5J|yhLcR5YdmavO-OyJQE0!Hm)?hvv})k_aKypgdY@l#hmOP*|xyA zs^ypd%uR@ar(7z2?<~|+KGH;0Xu6EX-U}6Rvcvxwvabu{520T!>i?ta|5)2cF=&$FEJ-xj!)WTzt5LIi?-A@lPOiI+WxT>1@6mZw+bw zF)7BBMXMY0?AT9Hcg^}cJB`L8@l2PR0unTBK8z%8vjk=&Dnf5q^C5f6CTLw4RnD|t#)+c-D%}>H?wLZA zSczEYBxV|GY)(zG&uDec`6k0Iq`r@#u<&MyXn58)fZ>^lhUd^OWx!q9YVfUd+x<<%K z!1Z(|;e5mH{u?aYHQp^JD47RtFs5(+G(8%|kGyKFHC3VtK0+eCYF=Hb*X zmolbt>u^dbV;bj2NewF8_{=V1@h;|!Dy|WJSV;3TW!7P#u~LJd2R+(Jen{1Qg@%qXM* z48<8lx_v0#=5*l4*vEvX1C^K3;=b*=il|~q3R6kffg;B<-$EVuTT_B?tpiXiOXmdx zV4{~|2F#T*W|+&Ovy9D|*Jx!|;tv>Rhtb&eK8MGy_LliDjNR90?9L8lPRGni;J<|@ z;~b1ccL+zcKN&J=FDZ?yy1Jbm)RoMjHV5*fm~N>#OEmM>1j6jLzZ4p{8trYU5>{V@ zLu{Z;_HVj##7~sAe8))MTr^osB&3NZ z^Mwhd{qi~eC{3l#FJHqkZE!v5BpqJZlzE+4&ItAM+)u>!VO}SDr^+^Dt9K5^70wE| z^PQQG(_QWXO>{XxMV`!oPmY(IhIV&^41NQlYtQqI&1hI3rm9_A4-!GP`~!TI)2#f5 zqxIsB>dZSt{cz^{S6676u|_`^eO--~4-6^4F(3T<{Z1}rqXlI9Ep!CI8Ha7k{gzCK zDj0FWVVicp#SGiDmmRhl_giXVTLyXEVPnO93mrso(_tH@*rEoLfbD^v;VBsN^M6c%Qc1!I$5R$6>0(mx3RCcSzm)$7D+^;uSY(;CBhfS-t|5`Ndjx(&^ zYFndu@2+5=5bN6?2MTc{7$}5i0Q}s40)CR~(a$|pTM=Q{P~sZi!@5ei9Br|w}o2@_(vRoqfHwf2>EflBy4wHYG0l8 z%Ki-?G-mWu#7m_9lCbkFq$mY8(;0NN{Rt&l;Kus}Y6Tqq{d_Hf*6W;h0H4^6VoUan zhJ5$zF?*dvWXz+78zA-jq6sK{XQ0n=Yb4#*bs1|RPxhEc!*ft(fnS}tnk74c8kiTkBN$aUgvZVA3s4g1R{hTB%`h2swL{7Yr)*dIsd(m7n$ z(|^X@qW?_y%tIT=d*LXrU*HOqxailX7Cl-R^6jfN$k)TG$;_zhaP2D@UlnvUUMsR! zT@tj+p7B8t%)OI+^q9btH&c^s`5|(P9sX={;20HZbKu}K5Fx_y zK4gR%yd!T(+TE56WyE%}}SwxE?ddRdo6o&mHQM;k1_|x7YHIKZ2#Wbp z*uXU!7I8D4hNB3S{Yha17mduYT7ub=Wd&+lH^$cVUU`7d;pB)D^vU_rCl?-sPOdiH z!aL=c0htDUN6H>19dm+3`wkZq^nzU+^%19xMjSZf0$3DY5e0djD*#dY|SxA3(;*!{0=^DKTn>SFFWu=@vY+?Md#BGp?cAc_Fqan z|MX@dR4==K_(E6jQ#)&*dNetpvI)6PHU{{c*-cEzsIzgKn4E`sXM zo}B+$GWBWu=TN;v$Fj$K0*B@OS(Qgb-&wJ#856io1XqD;r(IM{FlkUPa1c& z4Pqz{o*a#qeA!;HFsd5yqJu?++j{(IXbaGy#OG7qq%ZfTIYUM=Zp~m z)AT!6{=V#t-?TztZ|33!AFsDx^1A3K@S=|&Ulx1cnD7Ujy~%IS!^Kw$M!bOPz0S^@ zw&KGX3iy)ZVQY^xV#CERP~Eg|=kMDW?=+r;>U$5|jb3!CIssUke)QA#gWe=}*aBe( zq;dbeBQNg98D_%8Rz9x0-!|Z79_q`Qk*^i?_vfxaaR>2xVlUjKA|PaI~-> z5B1IAFK54YJ#eGwBUE41er0jZ#r7G%()1Tfs}5bSdwd-TO~s*E8y~$|a@G&}rea6O zb;W~DwnHrK_sADD;zu7UACgf2{LMIzKo%F$BUm*9pG$Iz9yeMvZ;C($}&*b?$pc`^Rze;A5!&huENr3*PVI{^~*XudDCs)PM;4@{qWBB^@A=P>yL2M*QW=1>`B~w z*(WGMR{U;Be$4c=N#%g*;_tlU&RM++=P! z7WRgm^)(wY-l9D>uYZDEZ}knKb8gdt80FABcTk?jd3}*rR;x}CMHhP%7RL@sD}laT z^Yqi-PoL-QX%`HrPI4whUOh81y-cnzC~SQ%>(xaHv9w#WU%Z~N=wcvL-+C-Ct)}uq zh#0C*oSJ5>xt}}^oqX8Q_TwMJQBT6HG9G)X0Tsd|b+31l+7o@o=G2Cob2Ry+q`6iJO@nC3d>PP0Cg{J8k4q z-o+Q4oxoA=wyt=COJ(15PlFuLjc_s~G)14TO0NVzkHsSmlflm0+Ba`?sJux<#@GsV zzpLUkMXiQ}a~OzFRooVwp*DRR$9(?=Y_!lDhk+NdN)LhNI@O2IryIC8K3XlHyB)|8K-vo9dMbx0O7Lz zPRFGw+u)a_DW-`3Pygw*D)ok94^ z15s3)!#*cne+KYZJA}XDw0KbKy@Uasz>!}9+|H=-z`prYphdmEZUuJ$MVbt65*@pfs5-Otq*0p7J-K*N^J zpcku{Tcr)$BS=8sXVa7(Q`OvZCYD5h2DzywgPZCr+D-M$uQ--ultLVN(zMZ?WX7`9 zi84+|dGaeByfF&#zzD{0khAYM!!BU!oQc5JtE5)!p090}-aFRWig3_f%sF+_v;$xM3PeLB-lUyszyT55u6zg+sqQm z2V4Y;G99)NQc4fotT_(bbW)1;(9d<)W|dNO3}=DEHl-_-3vW4_y_9^Y5FgwbIszCuJ#-Q@xL?EYwd?;SQ#gv2DS0@=(V*>GzVP{W@~}NM3hj z59y|4{P5Qie7$R72i~u~C4k9#z|Q3Jt|{P}Z$_^9(`8lQn*W-Fp5wOYV+PND#NN~y zve%lD=z$N@Y~0NyF(t>>is*SIzfZ$7&zV~%WsFPH1X-yN@>G7|xEW%8DQeWfvj}5j{aM=3LSy5gkaLZ7rOJL(Jva0NjJ)W2 z?gN0V>90hYC@JB&7A5OiNJ1o!$HUmqRYUeZeo$B(U|dKII8@(tL|@LFt;>gJcI{z{ zB}%l;iiE7grseMEf?*9i{6vPeWuC~lx7AbmAwTOuiIDWWS2W>!!SA)-fmJRNta3gP z+{VBPDY?@?xBf|+P25FxJ6oBt4P|`zJ@!eMCE-OcV(w%`kb|+`8;t!=#9Iv<*5YsU zrj9tjjb$v6-juRpJ;nxeB?i@WrX_2TSbT&r*Jqp}{IHa=Ib}vtHr6mtV(2s7zklBQ zHklr|zKryXAhBQWCgqtp1~n*KP}W9g<_0$pQXs2*5AhLVw%tDL+LvsY+&4c3KbBvD z#1ZL*O&c>$CDEEP&Q5XJX==?}G*I4USDep?0j)N9k>>GUYl{BfK9<$N<$T6|7^3dg zs{~~nH(Ym)bst#eW_X&L*1_8)Cd-P5ti>&7NRF>ERZlKSYw1+w=HG-@(0p|>f1iv| zxk~>rKxD;>tAq;>kySMr?qh;q4n&so4Ur|jA+j4Wkp-fd=m=!3ryYPSU|br=+OQ2D zI4+A60pqenmg7id0pluJxCI!OC5nJ?St3gyB8wRduwPaPFfL0JF9eegL{{lYWC7zk z5Lv*uEKvlE%Mw|QBat-%`(=qDU|buJ<+M)ojG=)nq!BYta0IfXnmX_ekv-^0WS=+^ z*&;_GYwhSrWC7!{-3J(#CbBe;1&qrAS<7BKE_i6k>@f#b477BDVLWUT;y+5Q8J z%l02&T$acJ#$|~tU|a_v3mBK}KESvvkR^fR8sqZ~{lRMh0Lrwf`4A|T`m?zIw5e_V zi#9T^;t}V~i3ZFrHRlN#dwaW38kVizknmWxI<}K|2^P|wClpwS=n8}a@pckmq?-Sy zgvY{Fz__cil<_>F#!`TBHEd7ZlJHoz3K(}C7BZhFv{(o*uC-vJ3-POOZMvPP@;#+i zAga2yiX3ucrW1oRITCH>%zSXK--M7;(-`JiR#aTlZ30R*53zueIhbQynU^(`<<3#z?jowH=K2D5K#C$V4=LufKCLX5iowaEV<;TcQ> zPPXt-aEL(lnNlXecTrIgs5G-8nO&M9KdI^tZt8Y7rU9nK*Mf1W;j7hB* ziAan|jR197O!^gG7MQJm=_Jmv36YRBf^&(8!%_rLT~?}*)R2#*0OK0*p#bA%p0U%y zGV?(Gn*!_Rt%istoOX-?Rs)XeN>sL$b7D5rfbS;^_$u;L${{~Izbb=-*)|{$=ppd+ z9XbdTKG_zPW(b}r1_*z08V`Vv+aCZQzvF{&2Jl&DUiy5l@dMD^t*nNb4c7D)Z85C9 z+`ywSqmiNUg#P`-cFk$#{LDW9+^8B6)kIY@hN$w%G13}9s+sM9i&du1!ALc!OVKul z%{NQJBeC`x=MNx4htNnGSHp}%Gbf6F1X@S<-aWaBXMxBeMpE~>#daQ`aYdeK zt}fkV9^T9B_>=k|5k%W39oC`LA(v+i{^Sg!ltmG{Y$) zD>X{W+p?EAqxdf3yf_ANwgJa6BZ}L_nB6n5|POnP-AnDS`93oy(jKhuSNRm|Ef^4uU5GGI?T`ld`VH)bDn&gF2HL)YKt~ zmigK@cch|B<}&!yQ6PC;dXhNJQ{0RX9|B2o(+H<#u6I?sbnuQLvy{z+{;uWTDM^sQ zenJ48!$W;@yKz9l?;KJ^qDZXB#_O)roSMh8a)$oPvdPQBp|#fq?5J%i8n&x%t|5uJMgD`?6A#J zDLf9_d_FpC)Bh=?+@1;bE~JgRV30m*VPaW>GgB4Kl3iVKg7 zghh4G_IwqQQl{mf-!&3e9&6mTktF15)Ox2qlGcM8TMmpme!b8sXn>~i+~Lw5@CPWZ zs&P21K(hgf=A^W0?4H8s0t|Ty|NZ<8m&I6iLz8c?>_r=l4FRrq3v<~F2QIr22<#wF zf55H#euJOW*Q1}sJK*Qa%^N+kXY6nyNMN@R5{4-4!SnF*nCD}_uf^RMCcC#$TLeqr zuKKS$b}R5$&wAjo`;&}HM2D+e9r&x2By7j%cE=&d-H&bEmcbgj5O5xLXY?m%v_ub> zc;otB2)It$=?q!ce?O8rGLn|O(G*eF<+r+&+CtMn@HbeMbj*#2jR@`DNLyYcwjCmD z+5<5Ies7#I`~zdwWsJG?0oHbTH|>75o^a|&cHLm7vUkzK*sxHoSLVvf!{au4p!DVg zB3p&72EYwSG)CtFWVGwcEANvs?H7}H5M_za6ww*?R*7w4=W|}5MZ-AP*Fm21X`TUa zS%*Id`u(!)xIo3}Ci*OP?Qu&=M^m z^e{n83gHz z(5A3yrOu+61PH_O-F?HV@5R!8?q|_|9%U;(I<@dR-NtRz%DVT9NB-kf!e%NG5ODR$ z?%UESSQ>T?8sB^sAoZ2U%jsBs-lUWGjGTGdskXwMmH@ZuxV!lgJ_KmqBYZqcCH=KmL|}Mc8U$rUSJVz`dJ!qe(YjHw3Yur zw0<V2Va2-t zG=AVG?sK;2Y@r_^OG_Ksrk$aA+lO=nqM{f@ASTfampV%`+)*^cHP1v0H*GC~!8Et6 zMBKI#aa%Rwwu|6S)BmCg4kyo-;{qXy-Q5to-CTUSwbO+EUuf;6(ck|stp&{WzqIy$ zY3={g+W#k7+wOl%YqK9*7Oda~7q9+9VCIp*5Q+IjzAj7`hn6dCKzN1-{>Ph>r7omc0JfqnoxkPt(muMXp8i7a?8hC^`Cd=jr|JE zSFC*V^uo5ok2*koV_pB1Zx)@}^%Y(TzVn~9c3Hir14K9LdtZM3xNzSn6q;?`^c0o^ zg^*GJ<<^=dtK&bV7knx9{!`FVAUwyZ7$T+7^yut+Ax_eN`CnU?j56(US@#nG$1{s% zp-$bn+?wqE-JEU%sczfd=`PorE$rcx%S|jc^>ESys5U>_;PT{qaM8W#N*Dl5m{1cMhIPi6vn(^)UuDzB-Dc9e)!-a6M$xd^ zaQNOhj3gm39H2k=g;K(8aArKCiQzQ)$kUiYMH(>Qme z(2|K8pXe^=iyP0GD09O?+ANjkHWnHsWa+hOoE-p@2^EXNoja5<`1(~_!LZ+uDQFA2 zFaHg_{#61W!`lA;pw|^G7<#QG`#Fc;CO@@>PflCnxw^T`?R?f^NU7_q3_Xc_C@L*q zzs1-8g^2seC~8*S_$&^Byj4=S z5aMg7ylGuCv434)mmw~FwJ5AJca5PDNxN$(`0F69Q_>hLT;|Ovt}}ZAWPl4_)!!jD z^Sv#ubGO4bew{6@^RmM>FbvXhoo^hr@juz(I^701v{z4ub&|X;4jcJj9O61_E~2>3 z*#qHGU5Da2Lo;z)C#NsJ<>&%i*0_{VrPXNGYyhK_xd4n(IsEQm)Usjl$jtH~a_Jr!k{m4u( zy3?*83Az<}=BIXrMg4BgAR1-cNcIc4OyBNzA($eEKnG?F)OB+HO(#mA2wehWD$%B zml#B8e<(0n^BKghjG{KXBs1NoiK3LXxcg>;O+Me&eGuPy28{BX znNC;SApEeDHHtE`QnMiv&@QEvUhX26VIlPmA`%N}#!$*v_)v*b!wD>Ovx``cg|s(_ zxa4>yzSBB}GP6=iQo|`MRoz8QV5!U-LKF7o*<<>rvH@EFI~h!d?e1F z1c8r4OrVUc&?+g9?Y@YvVpm#-1lNf!#Gix`OBp9n1ltq8NO^4k{nS<56dy|YC!xVY z#6-%-3ayp$*zOzNRoom4Y5pX1Sjae$g7_NTf7wzV+ka!birZi*-JgUpxe&wbMTHE^ zUgYmzSHQq*)1TIS9+oNklaS#fSz{>;+Y=k4yi(>ykr8JW$D=W20 z$}3}}r1QIq8)Ko`KM6G!vQDBjY)=$Pc_*<@X;-la7Lwf}v>-%U={37FQz`4Kf^TVd zt@Jg3lJZWVQygJ-SxDs`$i{N_%b!yarjb);Wb&S(yE{t2kwdr~TDadCLK2 zAC=l+w%t+Xm?KxCjyZBQWrzb;6W?*bYRcITSdF?Q<=w|HE)td@z%b75j&-LDmlSu1 zNR0igSYo_QhYjnlqp)GaJ|mej<`*_@Sax4?4?H3k!Wc*!Q-L|O zZW*-*P~LkK+?W`RQ69gO>z8RIK6N25LTjDIV#r+)Dn0-{jSpaiTAIzrkXwCU^8iL@ zCK_Pl>gaV^#*$~Sz=1Yk)2*(Xuf8v{Xzo6+MYL)9%isFW&Tp@0_6vYik z^{jK^deP1r5DOYgjFXju*GuLBI5D|sCY+cpZ1{Rt^X+N2sL^Pz2TCvhk`R))6S6%q4&MlMsj6st#Od4kMzr-3)VFavk8ea~sj6Z+b0aK;5I5W%$e86A+=xEia&XwN zX0?8N#2&#zbR+&UX7Y0{S>&W0wAb8HLjF?QtT85(a{v~pwU;J@ic?7DAqGAhKiSAp^_>hv@F3koj7Nl&F{y|cgm{>@&PZCX}qF}kSP=F zjcd{=4d(ze;c4#5<2_)`Xnv^J1q;GO8#(GliX`5#&hPD` z&TQStzY2q0-4JnWJs#{-lIzgvB|ee8;B>UmWFpVr!uHLQ%cUvHQa!4ZAA@mn^&W8odAb+u14e99F`>FHmAq+JT$K7 z%Bo>pR~~_L$PvxQmzu+8g=!?wM%6lnRe zmRB9NnP({?VVmDIhi&I(DTb0b@VA#pb)!kMU(i{#mxD97Bh%Ji5CM(xBswLz9r@N*qX zmm13ltl=zCU6n22+CQn~8|H$MOc;*3nqIl_!5o=*tDNcinmIB7Gb$}(MO@X@Mv+e3 zp)zL0JmsQ(u=T6SJk1M45ywWhBaEY-w4d%#AH&Cd%aPVd)Ngfc%<7nOA%H z&^3K`X=IU?Vv3lTf+}9b^}YICr2BHFW>KQ7XZVrmOfa9>tY;FD9*i@zZ;M9M*{dvC zL8+YaYo;twR-sX@4YXY_IL3IKOO#b=h@skkzA9K~))t>%nW(X9WNR9k1M~yEx(lTX z9?LsFW5!s5XdZ!$cs;#pEKXiMU{ywq>|C(9??Az$6{_Rcy9*&>Um0@TFbm$mx{JXu z?A0B8%KH8l16@a(*)mkiMa2pb90SfE-*s-Z#yXcAtiN}Haa)@wTGZ_A9eas8U!<>? z%M9$t-hw{&n5V%sS0-B+Vt*Qn?yA(R&;pVE5*B*yE%+V_mCTh9Sg7`{Dw7r>!B%JD zCX4i!vD7PX!2m2(K3Ar|Qi^-33ieP7MfxjP=$*G<5EiPOD^o7~S=D-vpdjeB`U708 z`n#@;*8DZs@nRRz7fUi%OFCZdXUD6pbG+KxdyZF|;dr$^%^j{bBgx@v71j2ur7w1| zG;VXoZMNvbV5DbHR^Tsy0a$ilr*iADJeeJqjQ~Q)V7T){%F{BWv zOVM{d8!_)D>wJaF+-wzVq^>Vx7)uXPJ~J0EU*P%=Ey_m9asg_)&!9TcLds>aZ~}s` zAP5Ys^RG>*_zuAq%OD7LsrX4828#@Ah(${&|Hf@ zlXIIwoRvNg4$|0kLV1Gp&nVJ04IdSS7NeRH1XysAkLd)sTIQ+XG>ZC-hSQ)e(87t$ zH8t+%8t**;8m1`KYkNPZSME$iAeR&YP^p>)OZr}UJP#>+{ghv{qR)`(4j8M*TsQ1p@0+v~ruy0j zzjcUvjx)bnHi~d$Ytd`{A|Z4#^E?E!5QMX(4lM+I>?H`46LxGN=*#X5Y$52!mJ}@n z5o{@_g+R2D(ey&HxgTEC;+*J{yWk^!Y%7vwVzzWOStemimB}(0E|DI$lVxtW=wVF; zIO)MW{ST65eJ|NxKb8b0IckZ1x+vWFi#rp1s!tMytJmH|H@RXmdcSBJHM0b$qF}Py zJSucL%mWB5S2%s}pY%Ae(p6>#HKSiaBq1+R%@Z;2I8gfioc%nQmbnW=7*Ay0wJ_t$ ze3v_fw^Z2E9+=0ur-w=3IT#pQ%AJ}v120maW@r_%%#*fL*Qq{=@(`oT^FXiRF<0@^ zQ1pf57NGgJiGCcKCE}-HFR3Ynr$JeWo`%~_W9?4^A$-`PqI7M%!>C`toK?|1Zbt^% z)XdFn&}3`85uUU{)O2RE`QZ90+l5X*KNct=@5cs7J`YYfkr2CG1g9*T-OMl+PH~?H ze9`d7y@gZMpj5oE))%VKvdQq#0+p7Vn{D0ItVzveJYoN>xd2wM|LDY5*33oAs;#?5 zFkFIvwIQ#!bXnmw4c>CCp`o`l<={0V-g2F6$Wu#klGtVqvs~tPfd$u__wv zCEXxkUDKgmXiFGd=+LI}vn@DwRlGx!abGni9Ln=|Ls%RmAr)S~VzPf%G1$W@#LIC;#V`Dp z_it#jh|X}S+|HTD{1nD(U@I}Kk%GBZ)j0xI{ID&{YWk{)yJn06sCWr%CW1@K@bOtX zB(*N5L3C$2L{_nUB<~9BoG{u?s_z`@{L^@2-mdlQWdi}tc7d9;WPx)PY###K2j~7O zQ*TLZo#~&=fn2f^N}|UO5B7_7A5L{K;?-h5wlq~O_Ge28 zYHQmI&8WBU@6s zrkdH3#x>PCXP)i7lJwTA`8NzSB8N%BG?MlZCJblW$J zmV=dL#636x=c(htlBphNl;(CtmQ2Gfu+13%luaEEZ{~X>0jKzpA7Vd68MW4JY<@+E z4u`1L^z)HbgfOqX+I~JbB9a>C<`@x4Y2C_$#V3QAsXh*^=`6=m37-0E#iX&vrAgCH@>YUO#r<95(}7;wQTCBG`S2 zZU$S)7AQN{jrTpfbFQ0#E%B4wc#-TrC>12z?z2)7J4CVB<$6e_i$^=dLRhWamZY=f zD3!D68s6rn$Imr($B()nQ7wo2b)>-v!Y0Fr_P zu=mkOUbKZ7CLHRTTX`sqwRrYD#h zYeEFC;543uA|?LXZzYq<)7-=baj&jjA6h&N{VZTFilYW$orJ*SN&J28mxT9UTauIWb^q)inoED-RM4Uu7DCg8X ze1j%85B&pf(1ufRgL2Y`G5a=HM1pUV5-&cFQ!vC-cA@jSqJEsQ9%Ak=1Ds0#*1FYJXicjo}S6>hv%E3n)pHzsi{KZH;*G$tferShv<#%BQ32$f1)=UEtNOCeg7p0NXGsB-;$R^XOR z&@wZYCa5ukidQT9o9^ov^YTnJ#8evI!Psyk-~X8f%sboBawa}%B=mIMupCG z9Qa|(36`tPsnGv)XebrrHZzwu&lRT2nRMILeZs&59=;h)z&8^*W|-n4H!P)&ju?J8 z9wLVC_Q_m{-1R6pL*vSi7)B)w5sSuvSKAY~tVT6d=Z9MiGhCUVVY7EC1PyC15pvGV zOIF}fq~^p$6$Y!X+6f+c$^W`&!CT(66V!N#oi3|zjODv_g2!HB1{2Kn3uuxv8@_Wa z-9S73!U0nSH#c=$RL!q4hi2&B(H;4JMjRWzKZG5@Et-Xm%7m5mOnhwu~~6BTT1E7w%6$mH*MpYLsER0n}n+f3*SkX@dVbt*l(jbV2R!;RIXa zk&t6nS-4Te*2U{N3aFpK)^2P`Ul zpa_!lXj}R<9_n^QKVqOr;f+1%tSy`v9HnfX8lA=I2d|I^eXpAPf{wV~m-x;daMXAD zffE2n{Z#g&=sWiW`OF>&N0DxmSHVmv=Q-k0L$~%b_-nhHTk=e;^7%W6e9GJA9?&D{ z2d;CPr(mNi5UJ>XBU&@BpQImD%unqP(SDm;&D~2}1ojtv57Q5N@xaM{a|C+?0dNy! z?Rf>|$`h*X;@?~P)+frk=;m@r+k-?1+q4&SxvR&|U5US-Gzpfpv5lGo{sMSLZ1KIu zZQ)AU9KjG3r-=Y3CfimzN~9l!>mTzM7}%24UqG>?Dt|!=TOzCd1*vTDzP})iEj{oT zq>mCc@f|$HTQKB}cK{Q2t=k!{bbcoo5& zE=AtubJXGZ>T3N3v%J;Uz-|NAZ)@STzhI8HxJlIDA8@UA{_?rrf{6mf08Xp9)vDvo zmx!c2?7firM|g9w-H2Uc8S64%%RL2ejfLKVJ(7;ETUz!dRTTYJjVNp&8vY$i_m&56 zm%$U`wp1KAq|O_vY5m&&5u=9Ptty_F^9O z4cld6wq)Ecldz?&+hsDgWZEusV@un&%j9h7|FHEPa7|?0+v$WPz^F7SIuQ#BYEV?L zV1lTqt42h{f&^?Rg0X-VCWW@H1ziY}iUf*O%k?Cjc5T>Z|S zWM;zq{okMak~4GfoqOhbL!DNh7egq3-r0YkpAV#VM(sw0^Ef>$K=yFXz%B@=LZ8$=5GfeR^gyI#B=$p*iIEf! zMeI_h-b0askxUOoT1H|&5}6oD@kqoj`$NIsaJ}`WYuO4Cf1%NS6P;`ali*}K^mJHP z8%~7_jF&=>!oz`m)DAf@eS%tB9Gq~*w%^poG3l6xy`{G)@Y z2#Rq}Nmt50dL&;Xa%os$C?y=U$57h^J_B2WgR~BHfW02UETWzIcX(Bfa$8pTfPZw8 z4`)5}@M$}=H6*`@>R}vS!tt*I6n)A9rc7$RO zUG}UWFO-qa`SFG@(s@5#7$aTq;|*n`Vn5z6M!M+78_r0V{HhjfDi%|VH|t_(4kKX~ zLkR;T;uk{;7)f?9)W}Hki=h>aq`VkvW+c_c&_+g5Ukt@Imr?B|(sMwQ!{Ykrj6HCgk?- z+lAX(w`Q>2U!9P@1G|#FLw1!tBF}19EJ3P&^(qwUT7{xWaUHr?*M5zuy*LQ6`0;|; zBh%fwW&~xr*frfvFkQt>WM0b7GilRZ^P3S}!a!bv*!1I8+ICm@`=5Dd6MEf80d^Ee zM1NYtAnYR|C4X>x_}FVC8`y6D3!o{=?!i??X*wg2+Y9|x;haT?zi{90kl%9ikDAW# zdVC+fGa5Q4KsPK%x13{|9jkx~y8tS!HFRE-)7iziiXNc>elS91zA!=+zLo(hWCo}) zR%-Zc%s<5p5Nn1s|Fb;*G&4Zh3~9kP|&;Y4JV1NXD=UN8{_J2O^!oRv1 zzLopJ2=V%kyOXX84sxfWZm0c(B~*mhLmb>5?lZMI9yx&t8L8j|CSjz)6Bx!wMJF(4 zM!IkUbEhT3!*~MoS_zF>?n=`!gD>?;u*kn*6;{8Bnme+qp*SPSuZGGPNqIF?&Pb}O zp-M(lUkz1lPo-MXTn)8S8v?Q5mu`f0@Xdn%x(PeIUuC4W(MbJ-iV0_ZVVq^n=q0}B z_igYRw*X!ymL=5{{w^P?FS2(p95E{nnSz=uVjQuZZmpvqEJzm6TNgj)PTN!IhW-5L z_ZIdw5pk6YuU!*`+b6WAB44IIM2N(_3)lvyOZ7H*ap7%OE$B50hs3m&JqNzs(+QXm z=>(b($-U?ap&coHf+j>AIU#mb&=bOVJM=S52vZX|A-bfH69Vmja^F-a7Z3ktWBB!y z;cIV-7+l=RP{74~Lmy`isO{1n<`J;xz=%4)ThxjQn9pSZhU4Tfm0*|M2`M~3+m6Un@U7ewcCXFt=Vn!uc_T;I4c_e6e$-oYz0MmPuy@r36f7L@=YUmp~o48b(D@lU*TeD4EinT9;O!)df%_c5IxX+DML98q2QhS?N z*AG+NcS4)vnrz$LfDl$Fk?%}tznOA9X%1v)|Kqj>KEu#ia9ia3&p%J*jDu?N z9mb?951kgy+@gt`Z66LNwTie%-ngxud^dyVCTU(6zIifdJd`xf8I~TgwNnrr9+nXo zCfiIABC@suA>z1q2G~;UGQocJ^eSRtwy0_#6_3;xxK<4m-X2K%kQ1aBBe^F?-5Ci_ zkm8Kwn;`XNBw2z~#z;X4(jZ2XCrIUtM1&_u!x>STAXPF_RDv{$kyHs%6(hwbNaGnv zogh^+(y|2UGDgxQNHvUy6(vZE zZV${y#C}BEBz4<9POB$pPKe!Xm_F==K3{%APu);+g5riAzd`xy^cM+Nj+7R;mt1v> zq`&0K>A;+;aMdxAzQUE`!<@V2s$(SmEmuxQ=G<*p9V6*)yK*`)=PF$Z9V6;1T{)eZ zlU1%dM$%Wga=I|*{&LkZlKwAOjxTfWj;oH5^mkl2U72%tU3HA4zw62&x-loKU3HA4 zuXg45G3V~N>Od07Al+r|$SDunwA!9OtiJ&@cu4%BM99hkfzp;cQrR;`kG3r)Y1tZ{xrz z`eht)ibidKCC2I$y~@88OgKX{fUvSD8YWU+(S|6RO;I20I_4l}16t6q5vc9i855CH zv=elolK~c&39@ma6VoT3H3Qws%3@dXdSea*4yG^zBP%f#OwrofL5p0Hh z^-_jBXapoMB*-U*;Zg<;E4&5%@pY%lupz&mD7Y987X3#a1$d%P7`inq>0mn zd5jb_P528VO_?U#&Pday33oEm^l3r^lC1oSs%bX-iaCo-tGX#iD7wY(%iiovyN(Ya z*Kx%=a2?-%hg`?1p<5V_PH+!i&#oP3Z7HF@!v^%=$+`+NwuI82<3T-m0bOa&@g@Ht zm!kayavlF50@tzHuZVg#4r(}pZC(NI(|+*!qxM^UwBnGR-CoW%X9Sw6>&*Av-G$#( zsc#Un0QDnj*l;1kt~5reaaX+Kfxi5bwI^451{&q$)_*PDbct@}<&1Fl0QC zFC&21aC0=|3h7)%ut*0clh{t8sDc= zQ(?f0IghEI<#X7FS$+8M_N^rA$FOt3=cqv!A?%H_AUr#BOQXxx2i%kE~RaOKK<}2vITsG9PWWVkLJwTd5G(h2>V1Pz`A_u6`HfDeh z_TZ&sfFM=&GQy}Q3l;{mjXDIN`wyvN`402z*%D^>zC zTMrl@d5>#FImLD~2c!8CADp6=Ei!q@iogs!5CAu1ZgK7uHxAr-D&{oBO-C>ycd8qw z19Nh!n~ssV)7&^d%(-c9I{m;5OLk6~zKt8r#HhNPam$%=%bA);%c6xmMp_Xq6fn}N zXrb^HK^IMn7K)gY#%Q6Wnm$(&&14Bx9idSW#v}aQyh>+^k5PV4bI|k_%1FG zoON-539&o+4onEbG9&U%HUq)HGdm`J7TFk?ymS>3KBH?WrABLJ<=C%y{=-FPV1FY%5%}8k1*e`AM@Nbky zHygUlo#)1$j$+g7#toz~BPX=K2_zym%&7+B5gc*a>4k3Qkhve6LrLQUHJ=+pmdb4%*b%?d4 zuVME($qNpw#ZQNR7J;Kj-pk+F`XTAwcnLpU0zvuH8~|YS>fB~iwTN@0bzX(K6iNsD zq3(6GjWeO3VLdO!GyHgQew0eaiFEVT^SBBIe9U6&EpgMyk4qdByof z8fgTK$ngejgBb>8+UEi>cQnRxpyHNvs8l!k=iH#3tsXsY%T4%~bp>Rxpy{vslYW+RtJWBbh#n z*@34+k#o7}hFTwn)HL++Yf}ZkB0*{D-WFT}fz*%mKj$l&#ac#PZF39dn_DRVwT1Fu zTPXjvh4NopDF0sz<^Lm<2geqP|Iz-}Lizt%DF3a6^50r04+ym0pl;6f*6(3d+vXfq z!k+aX{@~3L%=8Q6&-9~$NT%=NnSQ((v~6k3n9w4T__K5~CSguyn=y=$)|fG8M#?c` z?u@j_jCnCqo*Ba#X}cNg$Vdh==F3R?%$UDbA|U#dZ^mTI$s=YgkdX?^SP&x>nz4a1 z{32Pu`?DWWk%0U<7&-8go#ud-%(&(c+GBepXwy+}P~{v&4l3EZUX;gH>*qMf4;)mp z{J@DsaIcq9vFJGUYJL0d_e2aCmb^FRM7z)@yXdiJl;43_-8F*d{FsOtc)(20~Hy`kU2!gNClZg=|ic5KqKNi-LG!Mp_)ilQ7bfAf5vwB?s{w z8Hq><;yE$m(jXqjNXvqF5kb^y+E9l(x|AQqT>>M@YDH8~$NVW2U@=GbViMNTy=t@t zJuX^GyK|IYy7Mj@7e&X?N5*+^F1V!YqIX-508vPUmkth5Px7DLwu|OZf)@tWf(+P# zUa)l=yo*?<+rp-1zZ#d&71CTL~Lkr^hHKVKW2KNSkYTelPYs|eZ4S+ut`%^d=NVyu1BY$eFPha}| z!okt~jp!sVt~W%wV}0yY;}uQx02#lb0czL>1N3|!IY75#m;qW!cEH-8KggnUubvnn4t5be>@4`p!H!yshyaR;!Hy_6t3cItKG}R*3-oO{ZXQ+C zuT+!zIuiHbl_>n4hJ7!xFKG(#s7&cS!)R+9D@obRp%5mT=FoARZ*Xqhsm`ipAn zXX8c9CgUZ}dP_vbTXPAAz(qXRhVLBx)*GAl3T1ISAZ zFHpw5^XLij>Z$xHTH3Fo2~lw4K0P6dgR4)VVB85bA?8J~ps z0eLfmU4bwzHmRYrb*s<9^2Mg*V`H~ylHg-6Hp%(y?j~o)O#Y?r8Uv@%wdz`w(2{b` zv<5yNjmYP8-Ow)UiSJ(G0{UD`n=I5n7YuTJ8YN88EAvm-1PIhOVr38?z~}qh#0TOQ zyKjW}z)zlHCg#As??oow<6h-3_!@KHERsC#ciSc_Zp7w5lgcH~BtiSRRxEZuOgXuX z{jv-%SJx~6Rdp*|;Y)0H(koEa@=Y)NQ(Im=Ij>Uzn&$bQ=}96<_++#m>_T3*BF_u3 z9#}kS*A}~fiOQb^`>__lZF!u8o!{E29BO5Dt{7(AHt#3-Y@6yE_N@Q=VdcO! zcM(358#8C67jNy<07V6>B8Qpb@Fh5`6wXS&2s^iap6&{n_Aaq^7~W6DfMaqAYJ7p@ zQro2R4dcblHOKP=rvtW4;?XX%EBst032lEKu%N?@^-YeYqa(}!Dv~p`4JNAbi3cdTs zP0qflE|pd4U=#4BF)xrwj}pVCHC6vZZ6c+Aa8N3~lm7Ailm4%)(0v8b%@MK6Rs5!c znRHO!-#U|u6x|%D_Z@aO$07HAGyG990XI^Je?P}C)wQi+9B4>D!^|M>a<~bObxUNG zU8M&z@)Za9Rv$qC>n~~kPSIaFt)id#~5MykFg#Tm(bOX|x=4Y#B+Mrynz4Jx0U@B3r7IfOK4UPA1Tlz%9| zn@P<_5`FnrN|&XF&Xqo~IC~1XDPFo{SBhuu{zBzk?T!p4IiIpp8b3#S_&lSq3L2QB z$TzPZN$+o1R%yzfBPH|m-676axO#jHn;KtDIi-TV|7G&nyjWPF7YC&jNsH!4^JXb` z{AK^9Fe9@D3cMIqZ#dUsBqZ67tquxGb&c+`%O8S)o}{N=k_>+;XJat zBeM*Q#F~(0WF&k-mYI>{6S7#J=_{xvWZ|?#RB0w;$!W35Fd<9DNX7|S8b&ft$TBbz zYhsp>k?@IGW=4`v%wm0EE;um@XC%$UEIA_)hKX4!Ml?>$(lC;FVwQoCSd+4hjD%0h zGJlz~n*%_j@c1I>Z21&`%Bi}f=r*>^aoxkoaiyZc#z}TLt~vs2#&e26jC3~c3J91xXOo+l%|#Ln9d+@RiW zM##TvN{R161qR$7Xs;qS=vCyaEZA`$o@D6B&ytK>+PDx!xE3Ne==^6Xv>Wu2ci6KP z(-=1>=KS2H*XyyaZh@y zr^u7&=SxtHG@Oy-HBu!bh1W=D zEvu0-^mTZO-Md5h4gMj+h7MKJXkT%EZ}1h@EzMA|8kS}dq_a3*OgoFsud$-DIgy9N zw6nP3HFn`FUUf)Jr{!vVjTz62|C59BL;`+f}NE6&LpvhN}|Mc*Q) z=v&|OLtgoLtr5MLRv`u|yVI@)UI^i~FP%kM*|25iWU#X8WMpM&SC>-JCw=R7tD+(L zv@9A$pB^pWMMb#G>2_*l$!f6VEs9k}%1mV8j}^5j!c~i`4cpa78}h_u_)@q8wiI2W zY|bizw&d3BwsmI;WI{EH` zc&z4i+$!F`WIXoz9Eiu34fovz^XXv*bR($!d+V2?Q4*D6oE{A?FS`hMh6c3{1{|F_ znjWErXoN~KV1&vuGDwcW3(NrJy~P^Osj4n817vuMy+4OX7Bd5s{}yXHM-NcJ^Mc5N zw`hROXn=-%$sbD%keXPh|GBipU;70{DD;bO!;k^~UKGb2cb7NKaVJm9QoF%@rk)~C zmkN1|)UZ@2V5ApIg+fMpwNxl#q&G{25=LrVN(k3&fJQC(5wEAvOn2P>yvw46CDGLM zsD32H7|Hxd>dr_FkEA#wH9nI1GE&ncsq7)$3hS{ndynYpJek34uqI~BC^L}p?^@MFyM5`)-KapCd7>QVWP`TR?@yw~is z`J`IrUvQbtbE&N}TzMCY>%`78`w0j2gs&k>4whE~yoR^E5Ck9Ys^(DlANrLQ8$N~t z>Z-#ln@E(Rs}sqF*LI~%BuaCwGzWlOQP-y11h?4o4&7Y z+w^zRBSO3>wr$G)C{+TBg~cIzY9KFq()NLlplZP-QTZupW zJIwNuZ}^w-s)A3X->oNsJTl*#U_r2iyo`U8%0F>licj*sq1*DGzVT0g6RH;YI7IiI zq{YKRPx7RJ%3GRLc4*0W0AE$WX3&1W`J`9)n}iNPi^m(02@W4r{4VSiSPSVoiPvJtA!vV8DXVSFDt#@6=Z9#&Qr*-uaQJHtrt4NH)cg>59et|QPG?sr49#!zFw4p zJHA59U-}Y7%jzqvE*evm)1e^nSsn@kkXS%RVu)gYUBPPZjn?(bzl)9N$8~7G^l*k` zXdh6r`WrrEtOIm^VE6JICL2YdauH8XPYT%$7dREN5lanlpL{?KmvfG9cEvQj~++A(p+$+yR*2 z`})8(t@;@M6|Z|gV5qd!&lVOtU1ig2yIjQ{;Pva%qhom;qMR!n(Yxc%B>7w++YeFd zrR)N#z0oxnfQRo?;!k2ki+34u?LE2EUD33p*s~iKP9$w=4JV36jDZvHuK3-uvhS^Y zTe;NT_>~y{U({>FZ1=20fv#~%Ix0{{UZebB^QnNrfj{>_QdKc9-r0-OP5;A+he+{& z5U+Z9TF6a`WA5kmwcm}orsI5%#thoDdEF|^O!Jdu(4Zi(D zSQYD!o?E0VstrH_47bYnUcd)fDkyy<=7)Y*y(_h9H-zr@?#;q^ZK9yJ%9 zh=cQ=Tz^tDz2C9xbIVH+FrYpmy2@e`YUFLoao`n9-Ii?E`g8e>Pa#Qnt!p`a%G)O>ha8*i z^|fD;Hnd5PsW~H^f`zPoH0P-xcaambt`p(ZlfKc4robf6CH%*g zMM-*BIC1{(80g`<&OgA3ho06RUQ9gcb)CGc{l+#IF1L2_ZC`7TVm$lv9^YM>_yQVu zR+yNAh3vR1cmP(G)V@LfspT#2hHTqs>7O;d?H*SKC*AgfDrdScg(@#I??IKFQ#+s& z#3iY(j}fpk2;@aYp1Tr(n^1LU3uU;HX02h`7aqbBx@B-$rnBU zE^GBUd{^`P9d>1OtF7B?ZZo{zC-u$U*})6LLT*31nEIKM8*_l~O;qL3~P-l|UnhRsc zcx_n!{_OgirT+Bu9MF|i$R6I_4$xG zb$_YOe;pXz^yc@65T)d$^gx}@XA8S!NSw}tzVw;3&4ru$`db!??3k&uOFN*&1FccP z^W=_{wzYlV-0kO;yZ%>x`zclLlm2ZZuJ<{!zG~3xyHD3xh(_ZcosRA6Z!JHzro)xq zpTl3Hd(t<$x$yE>|D5%;ArYSn$9m2!rRYRY1^CFUvtV@1T|b#!?(PnJ>D-TRd{WXp z`-XBX{G54(I;tsHXD7bMkwK$HFu-fAB%L2xS!h7NzNtJAzZ(cPeNH`WF6=(V7YH@O zkA6x^T;nybhn!LZQ)7HrzTUs3jbalWYM#{_-;t*F819Pt6Ed|=tmQ)A6qz7eLhWZ z{;=C|{{Id^olnr{nvWSimU~_O@pWZN+GFc_emb@J!)9@r>3gn^3%4~gdnfD^yrf|@pg-YUT-v(xiA9ELRb0evgrS~bjtLpfhCr^3;NnSg+cF#h-PFRy2`T? zs6h#T<1>7Kbx_8ge(e*Kxb0#|D>NvDJC?e;|63K3_*De+CZw+B>YkSC9~e+{bM^;6 zFzKdj!qaT2R`l9(ASR~3+Dig@8TYK{Rvav()l1*ro&6K6nsJYciETXuFi@$V(vsdf z=*QM%T8(VcyHgMP0%2`UcgyLJq;qik?tzEE3Y+wgL_hY?)0;9_BlotZA1lqTE1y2Z zn@;^@y++NJN7AOkwiYE}ewFXI+t+&4u{8z29ee(H7huQTWj4NpMP>$CWM;lR_t;Mo za-$|Cw6}FB&mKGVS&509CX;;vsinLrd)CBo%X1yA_-o&VB);~LAnI42l$hrCbL|T0 zr|$T4cOp!ZD|6xU$%SoVRiDI7j$*QG@axLHl7D^(iD)Kg;;WFv%R)zTtvL0@=hNC6 z>+SqJLJ$rk_r1=Ye417jCcbEUn_5yr7Tql#8?%;tcy|Ft+hZpDrzdc;Kb;=y*~c=q zgnxSe%_kMgP6a#O$0e6sV$r8!cHcl+i^3aYWaV1Wv}(|s%3%X6%|v`UJ2vK^5%Klf zJfP#AoZr0^bFZtMGdSme?Lj!b=JYv}JS6TN{5wC^^QD0MIr~O)_Kmk`i5s!?RJZ%S znOynQ{vYe)35k1>mRKruvYg%6Tt52AaFSiP^=;ug>kA9?%q#2ve^aRQ$$F1oiGG?v za{4a%#9lx?p@@jPpD&AN!hN$P{0)hF`SEYjlK=lGSm(#z|36ONxSz50)Ow%trt+ix zKh~)W#nTc?+$tz_Qi_`kXZgU|XPM`<^W^UT^MoZIPkq+z^3L6Gi`SuT?Fu>4rN#1| zoEq~@z`z*StZe2_+Ku&fbYW1>Rv(f2;1@Fo_(Elh<-py2(Nb<^vvzK z?lF2z6pndm*A+)YY`wj4PtJ9>zR^?{uCMx%c+!KudiiL0>!}nvQ1@g&(nXgX>h2}4 zYd-b+M7j0xv%V_$P36<$yU_~Y7beVWLA+&12%4H1Mk*xkRqLBp-RjSetuY8XT5fMs zdE&E>#PuF@>t}W}pZaTzFIvDKyo6cQM)Vk-1E0>tv|lK9|N4KuKf>bu?!bL&?v?MT z3`zZ+ee=5*NS7`1{4~h=mt52`5?jMF8Ce_7brw*|GTG0OQ|D6 zxsAW^JG;+~u|D9k6Kg^e|8W=!W0(4YW1T^9FF&odLywgfGpNodIl5-m5O_W;@JvO2 zh;5`4vyGH?c@pU0@m1V=GlQYEQQ6E;s5(Qj5++j@1*Xeq4#|_m27wY zRI=SmyQa4#-vOm{KC{!Phwtgc=2J(<%gMD2zI&?9y{^pdk=vt?KY-mOcU~diM^fo1 zL@zQ;AG9LoeNaNcTV{38g4@=Uh0hlUS-+%;;eKR#ud|r%qPM4GYL12Zpl3q%`d1gm z)|_C37%q!N5^(89tgh;6!{vXsNY zI7DhYS4@M|zbnwN(&am|_`?;zM1AC40V_ZhhjCQfv-m?;+3#R)oB+pHtD$fxtNPyp z14|&y`8(Fc;c=zTf{k9w2co9sPspaRE2X;Z$(wk=nxDPVHF91RXoM`qOmz8l_{~aZ!EYYU``F*A zRz;;%t>yj6YgdxjmLEWB=~_V}8Z5g3Ra6bOYE|*fs+DFCYWB!8(24-HL?67+CBHr; z)vCV1sI?~;HJ$wj*|cFxsjgw6$Lw*f>t*QP;9Wp17B7e_Q}8>e#o~;16c4;{IKQiI zU-u_6924x3Id&Z9dSKyBKbI{2G=-PoWZ=(XLtQqpwK89OUG$y3MDVI0t3o*@t3m-m z=&CFec~#^05~R=rH>6O*MN*-U8J1xxxJW7VJ_8h5bI~$PqFYcnlvP_y>XWyU(Wie; z)N=T8vSs|=5?upftI)#?q)=lIR7ky)ROr0e%IJQ3YLpr=n6p-h5uT_j+RgD?lt%_C8MfTXgyMItRTbSM9AY27w_2};Jt5FDv$SF|;ER$TA zq3t+)AlbBWGTC(fZw0JHHg|h7*|~gw)H&HKvU6R0Y5~5T$oGamUVDYM4TqSHZ{3c)yRJx#h+jz9V7||NI z(zz=-44>x;gEud3#Y8LTyX<(0h4a0B0%`?2R)UDx4UrSC5ZHl5K;6yqg?&!w}RVlLRWtg)@~=a#m=8 z&ZmbRP~Thu{cNpknK^n20;V@tzZEIz5eSx}sUd zuD9vjeW=A2-VQ+us6U!e=YDf?4T8?Gao^u{vf)SV#MyFI*r2E!FS5tR;*dYvKT4y*(|INGjXq}z&++})#F^8u3e3@1%4Zz8`s+z#|>RV+bT=l+3Sw^9y(3rz4+d1^A!GJ-qkVQ19D@1c6dryFP(MJU3X_km7Zz7)V15`cXE-7ko;e^++hX}sNpRCc;3$KQYFbe zj`~&AiSbpkVJscq|7PK(b$dswX;rO$eeQV7df5)AB}y(ygFPm9`enX7yFNGmGXFVS zlN)gb7_OJ*23+MoVyEP`D(CNDbI%+v=f|-Lqr;19hvY*cV5C_O`$jc;s0<-WCCU(Y(@Mbf;wniIsgvVlLu#5 zH$y^lnrM_NW)KIW0In{U4Tx;QUGoqf4M9gIQb*mhr{}@J!RX)}>p`vis|Slzx0$^5 z_pK=!*z;+U>*M?h^3D)y$gzj+06liC+#rVC=QCmJqG4pNCK_e4zYqHsCx`Zhc*H0u zM9iON4LZ)2&5Qc!dTTXo04?}kJ5jr~+}gf&9Uro?jOB6V0~ey5)Qf(mH!H5_QPFCz zc_zJ$dY3pQ%s03zwZ{d2#c9`y3L#xdIK&zYx4Cfx+~$T0*4s>!nX}IAFqOqjw$#L&SmK7!CzMMS|Ijs3+}snX8tSDCrFH!z zD(Gd53ALY5LM8IRJM$(yUgt>k;{~>XQEu-JyJ1y80k>?|69{XR+X<%6ir)h%j&7nz zkb5@dS+j)BWN}nddhL%coeJ%#{j~Xc^rqYG)zr>hLtME1Li&R;M;2OTM8m$R- z+@`Yy1Z?VtG(cqADz#2~7R>G`GP~Jw0kxl8ArdrzId2D3>XoNxOK-RJbI<<+Xuo>E z_FR=<4t06=9F<(0+k-(%7=RhTl5!+Cliq6^v_*Hlg7+F-Ej~v8Scz8sV54mg zuvhv5)e8tkmy?NEzTJ8OfwbP1VlU#kMd>wS4F_m}nhScOJ9fhX~b1M-MYMpQk6_ZX^wol&Gs zdn1JIyf?O^cYsWiG|YjBe{S5CSf?iX>WwB<3@??UENn92?tmw?>{Vn*^qPuXY6p=_ zq6>|o_D7V-gc7=vdnq17(6uxYJ?j=-tCvIwtU7hNf;6kkSH(m3c@w{t}uwsy+SA|4Co#z0n#kVd`XK!%2wMV(3b83%gL%NnP^%+&{o^`w%S(PYO8Uw)i%Ud+po6T zHX&{Iqs?vR*rMH%kNfjxHF1@~Ym$$hx>e^=_Jdzm#lSZ$HH5yGag6St(OIuzhH$)u z!wjJ^Kq#Usg1M3vD+uVRL9l7zzyp(;S= zLD$N@jH{_yRdQFo`Bp9>=3SO$-Ta}?mDGZrs@p@<*gZoDc^fQy^x}GHV6Txm6?SLD zI)_nsX<6Ry@-)~HLNu{qBgQzK0&kaTI&lXVHO*y^`{SvlNH|~|^^RbOrrKd3^I(vet^AI4SS(zyj`TVV{%DSuc(aMMEi-< zx~E?v;V1~o{Hg+kI%?m&%3H6&y9sAn=a<2o`-}bT*;dKut(WI*rDoewi7s|F)rL7h zn7x%6-|VZnFI`FAPH!YDS?q7QL^p%3)ew-^HjI{Dz{t^U6kNrXGpP%k+v!yss0(vf z!NAd#{)K^~D^*;@)pR9Qd%c;dlqMm-N~!LC2@vj6TW@hd)9*k8+t?m$()%`?dWRV% zL#&xU+Y2;fo}yF}$wTN3dU-RkHOh$KKnh#Q9w}hk+n*Xj?>@+%iEYZEp5n$8WB9TA zAjbq@kJwbMQpfrF?79%=h@I#F0}$S~n@8#d+%3$?A|!kM(`cwcUi zN0RU%S}vnvU98I`OMg#N4;zm)J}}xU8}{9Ky$yRGXKL1g89sKILYpB`bi=p~c69a! z6~ldd}*R;>rytVUhP z=1)epNg&&tjBIn2M5V~iATy=A!hR&8!G$*&Ww7W57mD8v7viFgpE7Q=MAqwJ3@p-~W?hf#P%j>3eI^e9x!!pb&`J4la$Hd>(dvfn(C9tHC(?D__3 z6b!^Df`1%_LE#OfpzmHwkAiZ}n1<`EYtblF>mtN~{pmCk8gH(GTESRw?{xhsj(Xt@ z9D5OOofYyxy()h%H7jmOID6JnW(FDFSJRaU-y67$u4N3=n~U~Sw_t_DnSP}8(GL*4 zq8i8h=;Z}eY41ub8PO5ZK|c*VJ*_MO zc_z89V*N79uCP~T0au#;q8J7<>Q@*{W5zVtH?s&>a{p1A&ce_LOng~U#L83ZE&?c%H?^>?3T!y|IMc8TS+otGG^^6Q3oWe3pko@T?il4i=ySlJzuAMG zrLnyzgC_Le4)KrWhnWqgS7NRGVL5W?eRj)Aakp-GSHMJti=crLg%@lv<(z^`%e`Qm zv|=b%jg>q`+oaurZI}CnukeCC+(EZwk#82Y$xh|3SH~ZmtXl-y@D|}~7v>v@om3(l zd3!zNNn!0)v4rX54!a&M)LNn-1lo|c{PFbO->-A9S3fIzO`r>83)qA0saM~1zuCJMe<@=N=}SBqsw-aHRUSC@$=c`p&)_6tI@ z4_I{vpq+Fl#3lX7opff$le{-@Hh*4)BWJ+%))#<(SKp7oi=gB{C~2;=D}lph=y2#t z+*qpCuiozx@CWYn4vU;?A23RWn0#@kVFM-o>chLWIw^UtbW)skAMv}t0+Ate2D!hJ zK`!-Dhk%bn9LgT<)C44}eO~!%A@7GXO1Y2#We z?OC?sf}kIK!#71jCIhEt`1Z2gpY(a=@u6cWhQsW6m}F2N>lQ-UVTJsC6}9#7kU(*3 zJlqls(cpd$-Y~jW-aO2UuBAS#&@iKQJmkR}Nmt6BhxyZ$3JxobOr>81 zib}fDQ4ii2x>C_Rj2KAQYCNo9eTYO!8jP3K`ug#g{kenR4^jR+b|^o@g}P{=M^j`4 z-P$RjJf>T#w$)bstF5-jtlE!LPR39=FxD*jC#nTWuSEwbi!BR@=w6 z+GZzU0|!lDcZ77EK))&onQ*;VQzp4W%m;Bw+4zrHW%fNJ>{aFJLnx_|}6*jG${76EI)87IqvMN$FY_ z1Z-vq-+Ayx)0HX`upqjU@;DHZMxwkEMVBqkBaOCJ5CC>s<_@zkn-@;|4wiUcuT~Gi zXAs~#z*|dcg1hQO{TZ=-S2F z2d#TUVu9$d8As@pVg8=H(Dl@uDxA+Sk81xA*fNi@uM3vCRR3k2Vv6emt!ffwx4k@h z`e>?+>+>-z#WF7m!94j%pdO`b-4JMF=~{g~d0BL=+WD$U;k0C)k9AC@=623WU>>EK z(B2f7BIqUtc=CRrD>cr?0_jQ>CxLmCplkgpFpZ~c4ffbMsbe5c1}t+^C5z64t4|khf*zpR~6nyatz@+}AXp{>nJzpzGb{+Mm{x%>=hk zd<`z|O+9qYgSZDd5tTsocLN+Q>`oCKD2I!(6fgO8vThT4c5YIR(Y=46imaGT(vMD2 za(0Yhk~LQ>kR)84j;s9v5-a445(WS{ZeBEW>^g5W zbj-5d{XY-Y!M>y9ZQvmzYqJP%<6``U8i)#513j?|FcAOTK?5OrKo3OG1KiMOZ&e~a z5Va1vDQ$?=59s9C)erE3KGZ-MhU@opmcu}>?jp}u=lS$N7%v-xk5A@ogMmo@MH#QV z{Zd8Y-Y?9@0herfJUsENpQ1^iq{Y z)!@k!(6#n0z`VZnwR|ScR-e@Fp;pWz3k(Sgx>&&iOn8>!q_4QBkkwOdYnuh^4RqUY zJ$XZqt)&tj85dyg&!}41B?Xg=d9jDpBFusm56H#rL3vz(?~*s+cNJn@c?s_#rK8PZ zkQ)$<%F;ILM#FQye6&V8IAA44~l>S0O5OhCG4g9l_{_N1E4na6ON z@AmMb*0y~fc&8(DAp;aT$?FdsroO@2$G~nrq@I`A22s^oUAfpbb~PBg3T#UfVzL^$ zyT;^e7uOH6GkZgKn!?8Nd3cfnBRneU@u1A{Bj!%sLjCbwRtY=`2^2FGL%K%zCUa7R>rW4D?|Q!AFd~ z7P~&+na;fq2n9r?yM{kEdniA+3w60co{)GveJh_UpPhn&5UbDAK?q}zaK+91vz}Dc zLB2+Be3pLrJe@6}JV=-}hWfyf-Ble@y@f9t;dzKd_!8oi;1IDSaw+MhuA9Hb=r4d! zgunA|hG5_l-5v*AqRIf}-m_HBp0D!Bg&O><(ay zy2DMo@Q>YYDESadvfHO&wU7g$2OM_3ZTGl)rssW`YEax3X^;92zVF+_ZIPo>(jJ7Q z$UUjNRf^B-woi3EbgN6vuON>zIv3kRa+Y$MR#0`HaF5z%7yc-6sI;RyMI9qIhR zO@D)cJL;*gTeTxcN=tgRqmGfd3mrKfm~+HJM;#+_7ddi#n3Ibfb&SMa?8xcJoLlUu zV!@3PaFl5S|gG2 zk9#zB@Ez4s`MhT>m0!_9`4ugcU)e(Wm5w^XM)_4OlwZ|C`P3H5r?yZ&t%dSwEtCiS z74e2(Lc6!jeUtt04)s^_?jIT`;P4`L+sb6n0y%bw%mjHG#vD;X*KIUdDG zInQwwBjr8E;~B~D99P%LXrOWab9@ zy&+#59!NRYuJsF|e0tn}9hAFu10}FnmXmFN;{<}+V^4eNKquB(z||Utn|h{SBFW!E zX8^ADODJnCM2b3+3y;$+YY*j;lmVKIe@k&X`GMi6Ufx)W+{*;?r^TlBO|@L1~RkT)4hkS8i7(t+9vG(*Q84`T&G+t@NcSHjP*GDNzm?>~RMT;mi z{sW$RcpSAub{oF7lD69=1Iw=xV7SJs$Z#XLeJI1_wsZQ3uJJFrMrM_gx<;AdivI8S z0Zdv0-5u(&u6$yqG8}|IVI4NMN2DTO$@+h(5a}#-11@48g-B<=j#oOw_95dVcYx?L z$WVnv0qd;n1`ger_5`|N@j>f%IuU`%jnV3>_nr}bP3@4-^tPO zZA*`i`jJAuUOTQ^TY7YI9x0UTwMzfC^ym~kQmEEzWrQ`XUvbqGq5T03k9h_fo`P1M zl-G`XQLs(~?@}k=ZBJOR`-0$vN-d+X*$?|;kOB?4%$539P7wEK44_s*{2v@+B;S8< zcSe%^gX5$`km+oK{=t2jlk$IX86$=Ng9j}E`dk?**}Lead>r*UF>xS-R?4qZQ&V$U zFJ~mfWxa}#jF|um)1P%kZw#UdpYbbtd^NQi%?`F&i_u|zFgCK`DovKT zBpA~jt`CIw9OEFg+~lmihcKNmRR7E@{Sg)?!3GCe_S$PNo1joG%3)x-UMNj%t#;srI6xJJzJG3`4ZlcymS+~(p%e@UVh;s|g zAZfoP)C>w}{Gf473bbKcZlKwgmpiJdFE;s3{j_{@ATsWWcbUF;j1QA zIK=iN4gVps`&w)nf#{ZRA;Vww1ZZ?EhR=QHSiv)eK)|{UpF0cY6jEg8L>N5m;)n{`H*slgU(Ip2o!@HOI6b5Zn>=Q06?q zt7d@*)RKDb{Zh5ptilA84)_-VxUF-3(Z&k^%)R=8#0JcLx}9Vgq$-}-ld$FGJZ49_ zuAl#5!^>%WX)*#rT>Y(=4MP0u3fVwtb5DhBn|19Z5zywmK-(tI+A+@Syp_m#?c)iW zHXcOeJ=s^6A)+74_ww_0*w!?d=1Av95){;Xt!Y{e51|mab$Qss?_f2@F(!s;kS0Wm6Cpv z-#iQuu3ULxxH?Qfp<;MM4-bq&_*cBus^FXSO>Fi}0VDuZ0dMX1{5(lgAN-HM@Xp*; z5H7WR%;AE)?I8VR&WCxJ#z5!_9Dd`Ra7fyMhrnF==3?IK(1a#&3_IkFW-uJWT+L6M z;8301@^_DE20P@AW-#zx7CL}yLJ{)Cr)amS+p>KnfpY2_koAV8tfPMtUh48)<8QAb`bW)N&SdU*}VjnOn@L!=)kRr~>@0Zt?eOh|v)8>O~C>+15-K1@Kvnh*>REtTW76 zi`wtkCV`EzM_&RX{ln0{xs~&DL8fzSqrWK)TnD#{M_<^M(zj63)4XTN_EyJ?2C_Hjk6H;yi4w_fB7!Nr1p&ne%9~bWYQ>5(y5W)JwvD=g2wMJ_WJXx_ubl*i4D4w@Ri6cS z?csvw0-)LGx5Hi;e?Zujh?nQaPHlo(r;~o+vsJaw=A**I{s6_UxpNl4;yZRNx4`Cm zN4z^f%awPd&;Ny&=@S#9mbZk^eR#Mli$wc)HD$+kumRy6_4Fk{WK~PjCXrBUA7c4} zXHLU(15b^e`c1z9AN%_5M`3Ko^6|~5+5NDz#7CXqvMBs>#Iw_5w|(RO#`;0V3Q5et z$~iI#@O(1Bd0X|v zHxTQql3(AQSgQ#OiMV@e!M26m)}OWEsARt0N&Vw3Yz25NG4jyO>qKDIDq8sold33cq6c8 zms2ncmh{Lqedcwrk9+PErfYh?nM}RVZt%Nv{BHgv5XgLH?9?C*4Jl6TUHgA#LO9`R zh3$o}jY;>&U3iq=o>x9uz@UV%;e8X&JHgx{vEfx`-c%lJOTr#)!HMCCce}))Sz>!I z`*H^ga6R_XeG6`>Z=KJV#A4x3R$$y+!A}Jg2r_m4!~e5~>j1?@!U99XZ&Y5P%crHj zWFx2_IUmN~cz609b@$8HHA60jj#f0{Zpx&-q|G65Wl~~K&Fmx!N%*07+d0}G!ebt7 z`u_|fWaGO7-&8OIn+v`D0fRkVL^dc`&T^MSGM%5O@v`-}QwB!&))_h5fg)wdc zO5amCZ1y)~OmEM`O#O_Esrb>w#3xQ*avuO5-!;SvNekccHYD+>_3)Vuy<`@2SKyn< z*`LkUTb6vW`NIgo{{>Nd@MxZ8X7VFyK6gFAO!Y4H=cRsBzKk8sg`GM(19rKvEBWVC zck<8247&?-4Tm1#H=bR|Qv%-n(Z|D$ZFMM~do3|C49`Wwu zT-EBNU11p`M>5W3k7TuOtB+wIqPURAeW1#odLwWjXcA52$7NPD{(dPOkDX zrT!yz7MMylf~COc>~#`*F&MBY967?G%y2HlSh|h=!o`ItRuqc-h!usBo{X-;)P=Ha_ib9!!Kq#_5tRR$`P#9n|*GLeGYLlLzQ7HK&!UWH< z09+*hgF?Yb6pBWTE^j1A6v|gm|Hi;y_Z$#zJiVq=BX`d(3CanZoOfmUu_6j2@-@K% z8(|TEA_-*`CXfK1@VhDmW;!eY3=JV1L!wPshfNj?)W3mN@DO$oVTa6t7VOaFZBSau zDi0*FLz=lXc4%E2vgztMWYf$3R4LvEIh^#hw;*H8+W?os(mj-*%fFh9V1Lqndj8JY z6RMmQx3?^GComy*oV(pdUvXv7pMP(pZrT!w1uxxJ^)-y#3Vo#N4Ta3KTTA$VwGSE#H(4DVA)h9 zplB}C=l~>Rdf@8J#wD*$8vs0`I>R5z5|*Cp?#q$8SCLh6lE^Ac$ST_{Rft+wFS$$@ zc4Cg$5qIb9k)Pz-VG_=`s%l*bixZ2Bp2%Cz((cTU_4@T(=O61%_b3ev5b6V(2Fm36 z-r@RQIJdW2-*=h5kCNLrn;VqF?VqRbU!Wgg)DNuC4>BI-r?LPAb_Kp2!sAOQ!Q5UI9G zZ3UGIDmAEBK`{d$gQx+ihgxd`f}(;1go28i07_n`Mn$F8wm}@CqK%3b73#NxeeeCA z`#hJwa&itid;iwF_gZU9aFj~FG70B$(w2H;7<%3j9_Eb5wAPWEu4x zN-~o3%Fq`11G)R5EZ?UUUaJQq1UI@wRu?qyH3loatF=yrxZ2-SdF$HK0xME}fK|j*>Q#-EKE+BWvbw0U?$=1IW5w3j zQ>-6~Xhst44_BH+WPN_5j%-HHkdKnAV@Q_ol7&DnwC_P~QBBz88=`Al{la7^jN*H( z4s1m4X<46BcOh1&Q6ICMNkZ)0cT?d(LTDLRYk@_+$bLhc0WpMYX?wB+@d~5x3%?Y5 z#Eb=DW!N?MBbNVJ;Dl-bMG5nqFjcgd6;N~Rwv$vlP(@W=cYzzp)b=%wuLIugnd|W0 zPe3a%IR_th8Xl(y%|q-^m&);0)Rr7=pl%_D>4@lDoMrXDp>I#i#Sa1;4S+;qwOen+ z2^!c))Rp49q(bi&8`cRit1Xq)E@z#rW}Viu&R$~~Ua$t-ID-T30hlJfthOjuH8V$9 zl(WB=f%ud&KVa4(a%vyp>pn7_#$$8vG5( zaM&PrTyNvJP6`~eQyssRJLXh7N&$1*HODPl#~K?ZT4_C6&a6vi9+orJFPO))%*Fxc z>K3oH*S)@Z=_RrC-quF4!%{x#hSYt%C9ZS!)t5Fo+!dSaMcEfnS?OSimHoV=>0f@f zJu|sC?<*&n*jWi&%8*zAW}nu%3ZX7$N!3~;QEZKtt&X%NW0vb=o)_KlIo%|! zlTvQ5qA$R|MyiE0{ra>;4x8$`foIA-r~0~~19!(usP}HqVe={u zolZ{t1R(LqaxG~IxjZN;5Bc?`V)g{tM^Q#>4oho~9Cuh4OR}1l%8suQSicHO^qjY` zO=bIEFfyuo}Ty+>Q`St`qx4-A7-iI_uX49mQI|Y5jnvm z&x>6lSIN__TYOGEF8X&&4u^8~PpI%Zy8f8u=dS<$7WKx|1dCaAi<)mLLXfzVut8D9 zg&bF}B!`R|-=|$cnS&93k@JO&{6N?IAYp!R*fC4rq`ty0?^+@fr&=MZ%NU(Ar_WQ` zYkTduF9W6A;cLPj2OYOm_Z8*c?Ui#sY02nMOG_o;%eHOY@3c8xf~Se_6)8w1Gpn2N z)dojtIllFpqa=lS_yzurh*@_HuL)$nbA$Ee%q$^1ngur{!i^fZMFn$YFwX>423RAc zRI9mlDU@0n<%q;AHOKYLqiQ5|6ltTH^0S26ETp!&Qris7rfbx*GU_Qc^`wT1mXnd# zoD;DOuB;Pov_S^#sfPAY!+n&<`njC;L`Hilq(3v!Oa|Hu39DlP{cjO{L`i?6p}*JD z#|F6Xl&n(&^baIkH#b`k@wgB*vpTP_J~Z1JY&iFTyKlKIZ^Z!X+<@(aRL-Mowt_&; zeH-@xH+Ny6d!X1o2+5e7GZ_h6s$E36-PC5g=xcT{<(wxA+*6PWvT=Aez*(N^?j7r} zVu8b|R0os@p3&^E_L{>tz;T_8BhTG2s?9*hQc1+(H+XE&v{bnh2^pSo-V*M7g?MYh zt-`eLe$M*wja+QjyoMiy79?}41=6^pT-EMYfqx1JeL0>8O3W8UV4)YfX?7asu0a*= z2is6PgU)2LGxr&Fs)$;U>Or|0X1j91NA{|}cuL*S;Ls48YE_^{-cU%HU6e9p!Drd$ zffp1V%3S&=56Qs>22p++=kSVwbD7+^5*a(-Yu1Q>)hcIo0(Pxpv7bmL;^AH66}BPm z>nriU86dkoqYi%qQDtgsBi@VaV!bzE2EzY19w=3L|8LztZg#!5Dd_Jsbj>~b3}7EC zwoj1T&(_+11|}wnCw?WLn5>=njoe|i*5Pa5uw3l0P41Yhb=(9T*NYvm%GnpS>{4wZdzCm)AF#H$iT~!`ma9@D~#RmGJAp^TFfx<_`QC zz`(Bdf>XE*Bq8IY)0OGtclPTNka{9HS%k!sCod*XNzWI$QbISA2N~orQ3Y~((r9}F zQ*icM(&wQe`@Ofhb68h`;K1f>U@6V=Q2JyNJuvOX(aNL#rad>$svjP!AMoA@a~0Cgh|9!C#BBi5%X)~~-ypzrcCbn!K@{Ek?GP7zi{4l5vs-^T)m75+ntfKf$c zCpof}Jhc*~TIYxF&yQ#zNA=}Ty__G_lOH`qj=oQhxf>TVn!k9Yd$Dr*lB)AdWYgk{ zd*YSTW>tCg%u-F8UEecXZIN7S$(Rr~r^DifZDE$iqJkILtg{HVTkY1)(BRVn$ZG#g zi!Y*E-qm6@fcWw)&Sy`NAawJ?l=%qe|LX#ObmEBPE@UUE8NR|Ls={P-;TNbhL0|Z# zq3|nH;WAQ@gi(~`TC`qRv|*g8*(pVvBt_rKiZ&y^M^%)oF503g+Nv+gGZbwzpPPlo@C+3$QsZ43JX2(k{Ba2b zt{1|MiExtyZdSrvHN?5Ya_%VL6)1RRm`|m`XFs9@=3A@qZNR!Hy@)UZUW?lE1agfu z{fJbt{>Co<5aQf*JdmuX?;>7Z$JI$NZ|ZCCYj2)mW+nGsGuKu+TqGTZ#dF=<-92L6 zCoOkJ{)9g6Y#z(VC-JN z2Q0|VBgi8zXwr%x@BAR2gF*c6Aiu$&0CI4kM{sbQGh3h#)VbJMYP6E?Td>Zj!Uthu{>nP%7rt& z-Z&$@a>nZWd21~2RLzV`PZYq-SZ6Ua%X8*R;mnN-XKs>ioVmGjW^U`ut$j1M4bN1q zoI};QkqZ7_jz}s`*G1O1 zWY%{y*2tuwa$gk5p?D6N6co=Pd-Au8{twA)PiBnUlLTZ>h?L)OnhP=(x#q|RH4h4( zj2c0pmJj29sjkSj64P(t?}1yp_%@CMj5MQtRzH3WO#H8ByFSQ!W0Lo#_1>E+ymN_9 z?%-1=uJ<=q_&;g!AG+@U{H4Ds$p3zl|3f1A4*r9*V^8F%6Oo-aBG0~xG}uMo3y!}3 zY4pPl(Z)T|PfkP+-H3kvD%xZh^D;PQ_|urbH^hwWiFtP-X7onPKd)k3?f?H|SQ!^Q zEpGDvCWE~iJk6m|7HkZH@tn_D89K^ycz8UDre?Y^}?JokXs9# ziA$%EMZMjHaFyX%Qr2hfuu9!0<=M0OVBcm`Pe?CO}Yk1dRqqGs!1+9$! zwsQB|{q%RWbYA^0$TaX8=0}llwCfx3hDb!`e@JY(aXDi_4CL2e;G5(|EEFm z&k+5?@a_+z^a+K^2|F2|e5XX&fs{HWrIA5xQ&P_`tj;T~t}v``Dy@HK&>kphua)#M zhErj=(=!Hrr;YQEHq5$mX5#|q?`>FZ<*YL{?DOU9D>g1S%UyoA;XEkkJhS1xF6WNf z@Vp8ucsp%IL8`q6%NUFGaX`#EiubI#b$y@L7mEcDYU=3cR%ce8Td@Ae4~DifaB z&wpJxf2`6^wrFw5&Bf)5mJD?HRkeqo$GtGAe@BW*0j%&qCtV5(MTPmm8lUXcpCZWz zG(6nNn4-=HbUeywB*l~uobXhq;#A>w;Ebm^Ri#R{116r~RG+He4p?}mQ%9<4J7D8k zPP$ZK5pcn?o%&NHMSz3nI2lvbMSzRvIgO;6iU1FpxhhSv z7bN2u&h=^Pz2FNx)43zfv==PJI~jzvd?~)cS)Z0jXpw~`e%0YJ>0{FYuoZvdtXn1g zNgUNr&ck0j_pg%t1h(OCosFy1KLIH|<~*{>^b;t?-I&Gc!WvM5PhwW3OKLzV?!&B4 zSJ!|YxF544-Bbh0@Ia<6U3d`e#3wWR(O5zl7!XGji%19%Q<;;lX` zk1%EE56geTrA*msRD?y?T*PywPbE#@EdG{hT%&FhM;poK@G<7d8dDR{;cl$rwZazg zAAAz4YOSONT*Q4?^=s8F;1ce~>R4-PK_wu8EZth+32+6U%<5k&IRUQX;Vk1?^$G9` z9>p42YdQh?@l;lErtoL*E1t%x%9Q*J?%)}$`b_oD;5R(epVg6R`Wf8Cvsk)JVLSL8 z&t~;!O4`96cn-^$siAFQnKJe5@;`AYYls{}1YE+um+mKj0w=MnB$6&bfiGO?KJ0pl zx(iUDAGH=0UkgbylyMZ;F%y;PfzVH}Zy_YI$`YMd@Eqwdc^3@l7&og2!ET6xCcfo0ta9Y$C#sj1b%{foRJ*U zBTx^kImNlcC*TOI<5cBJo`9q92&X<*{RA9?jhv3@xuz$e0XB1Vxx%NQ5w>yqb0tr~ zad?tr%vC=HP4En7B-iv5bie^l@fP88a1uV?jO3q$k2tC=%IES^5QX)G=_;PX5Zb14 zi_xJL;8&Q&tt^;DEPIYSOS8DTJmDMgJIv8@GR(ABR)kPSWlE)jVHWgt!N;R*zMp%kUO6z!4HIh=y zmvV%My0DdEd`cs)qsCN<2`J4xU5#)D=0|Dc_18#tVE&YoJY$V|2Npm%!yBnF?ZCn) z1H9sc!d)0+j`CpQE=4%y5l?kcZ|h%9c$)A@Qbjc7Jt)V2Bw#T-h2kEWKfM=_TY?4z$u+@qLB2>^DXK89N59z_Br z(#KS*-=mmMiSg+vm@BJe>{TqLtng9RCGJ%$p{(*z)hYKXk|}F^)OGs3iZ3YZtbH_f z3_`J#vcX4Rmq_%R@KyGMKFY(12NXG!M?R{<$^(jA%2S{AB`>cGX zzrIO*7JE;5CgpcDna*OPloxzmlkgliMtRNeZ<3rtr-;7g8=KVUun&|m{zy}b-3Pu* z&FE50pn3>UK&(_Jb}2rgdJ9x)WtW0N9N6E#&pI1c0P(y2DR3O03vK;N9$8#{lbz=is)z|gGh zRdA?V1g2(vuYyaJ3Pvp!7Zffovs_YHxJ1^%xTxS!4+@kmi5C?<)WZT*i}IqvmwHT~ zZqZ*<@TpA#O$+0aLO?ws(6=OBQutAa1g$3|moR_oNrACNeF+Poo)L_+m@Z+`FzSGy z_=NB(7EXO2s5&9JibYT#3+hj(uPP#`e+e`v7{4f{QeO%5ClY^AL{Z-e3@4PoD59zF z1*Q}FUlcLa4+2>$fxtsO}ws{MfLVmwJNVGW>fio>Q?=A#T;sYpQe>@1Di(; z^1IxcctbId8tP|gRo+k}5Te)kM5(eZ@s?r zW*3MrLz!aCdO52VuzCQyQ_I!?mp-}6T`>>s>lo0!j+TSM-Vv!dq8Q`Xclnknd@3>I za{BJa{Q6?~otU7bOQ4Gt3@ZZqG5`Brex@#eBNkEB9X_f+OB7+sxbU+4shYT{m&uV0 z`4Ju6k*($hiI_gL8WR_7=#CoBj~OM$7Zk*+dS+Eko2BfDuPvC}U@@n)U{0sS+@1o& z_46(lB+b2BkZ|83VYFcWu*Li#i^Y|eOUeon#h0Kcpnm$lQGn+B7rN<7_2-xNS$=i5 z@Jr+LuZE_h-U7>H7nT)lPgh<@ubT1E+fVRQeL}p_TI7`+lm^c`^==_LP+sJ}JI&+rj*c@V^ zBWz*FPq%1aqHLC#7UTuHx@d`ngA)uQgNMz;94B}IbL}VWoMAX9CgwRq0Vc$WEzVGg zvH3)!Gjzu4PZ2t2C}oHS9*WGrVz*#Hy|4Iq9q~C6ideFm`Q|U@#DALZF8_xo{&=y$ z_>#1lXko(7FwGD_VZp1kH{JMu&{&rVuc|JRZxat$FwTZ_c1&ab2ahCjNPY!Pe~*Y@ zLpRKnPJGFR@feRx95!FyO(rg~VF0H3lX%aDv#_x+VulOcPwQA?>uQE0E9z*p#+NX; zKt6+%6Qwq1uuY$~bbj@5@(qH|fzy*j$Nzni`RtE7ko3cNn{z{=Hnu-Lc+94*o?I7x z360Ktl8C(n)75Xz`H^{x0{@XM+}1ae|HM|7#1f)6eQ23RyJ;0MOZ|8b(ZzvZGQFU#m%tMqJ z;tQi~_g^5My25B|bd=z`K{tkpx>jYrdr7o(_jB9UWMZisi~z?7!VLyt5;}3o4PKu2 zpYs-G!gD7l^rX9r1162b9ShGM?xQ=^+alsArx-RB=a$^4|Ec-!XD z35|6Yzm8fG|9HYMY$}Zin*<}UjT?xKlb|nI-e4fAC&4I8vz0JRf?P7_G!U2b7$|z4JF(2)d9CPAe1I7yfM9P)bbZ!G#!U8?l22 zecfe;#zba}=M-PF(1oNw7KJAKpUZ{m-g2*C;x-TFF+^K`H)BRtc(@e4vlU$@R{Fp% z$=U`r(c=UCK?w262c}_fUlYl`a4sf`AnJV~AJYhko4$}o24Ssx`S3lqa33+ohqGv% z)JbO05L>z1PS`;#6u{k>KAw0Wfahq!$Z?)SBq*fOMMR4qT+W_AX9~5*Yk zU@pl#m=*J?_2jdJ&>vP~qAA1;fB4N5k$HUvhy;3ylxmhp>2>=jQ#A*OZv$Wo=qLIEpa|2iA(%qAjCOgI9ozg!(lKxG$`GOm z9S4Ca{~%lgVHr3{GzLN^OnIB=4TK!Bye*qB2EqkoO{6O^H3;FW^dl;QpbR5r686DR z1WJfS!O+L=*O%lg;|y6IH#x5Q1Gz(Kp+na(U}uYMLoU-Sx*tj98%-)Bjs?T1@oRU} z&E)(rxVYGzUJ#1^`;1kzeQC5QK6;#AcdwYZiO={~7i>!n*uhPE7by&AH}ma|6@Bxo zd&%dCfDq_{8Qh745GZ2YeML62&N8bBpBl$VgJZ;j5Ewuv)xRe6=mTTy-uUegVGc<~ zf%c5Lojw^&f^q-cI2}GLCs~n=uQi#(vMF!_M`j?*^fFdb>qk2HTd5>rcg$~$NEzuhaxme_T!Was@Z2wqb zK8Pv6ZKolgo@C_9>l+EzFzAg*{D@g$kc}0*Ay$RKB+O(@oDPEl7KxAOW#;rWCNUu# zI$|Og;T8^An9iSw4u?#PF_%aVhhJgp7~)hoQXNO@i96vilB_xMhH#32iy5R)TOI-# MaU>n37?Vi<1KXK*>Hq)$ diff --git a/python/triton/language/libdevice.py b/python/triton/language/libdevice.py index 25e75c89d52a..f42705dbc981 100644 --- a/python/triton/language/libdevice.py +++ b/python/triton/language/libdevice.py @@ -1,1525 +1,1526 @@ import os +from .. import impl from . import core, extern LIBDEVICE_PATH = os.path.dirname( os.path.abspath(__file__)) + "/libdevice.10.bc" -@extern.extern +@impl.extern def clz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int32,): ("__nv_clz", core.int32), - (core.int64,): ("__nv_clzll", core.int32), + {(core.dtype("int32"),): ("__nv_clz", core.dtype("int32")), + (core.dtype("int64"),): ("__nv_clzll", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def popc(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int32,): ("__nv_popc", core.int32), - (core.int64,): ("__nv_popcll", core.int32), + {(core.dtype("int32"),): ("__nv_popc", core.dtype("int32")), + (core.dtype("int64"),): ("__nv_popcll", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def byte_perm(arg0, arg1, arg2, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ], - {(core.int32, core.int32, core.int32,): ("__nv_byte_perm", core.int32), + {(core.dtype("int32"), core.dtype("int32"), core.dtype("int32"),): ("__nv_byte_perm", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def min(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.int32, core.int32,): ("__nv_min", core.int32), - (core.uint32, core.uint32,): ("__nv_umin", core.uint32), - (core.int64, core.int64,): ("__nv_llmin", core.int64), - (core.uint64, core.uint64,): ("__nv_ullmin", core.uint64), - (core.float32, core.float32,): ("__nv_fminf", core.float32), - (core.float64, core.float64,): ("__nv_fmin", core.float64), + {(core.dtype("int32"), core.dtype("int32"),): ("__nv_min", core.dtype("int32")), + (core.dtype("uint32"), core.dtype("uint32"),): ("__nv_umin", core.dtype("uint32")), + (core.dtype("int64"), core.dtype("int64"),): ("__nv_llmin", core.dtype("int64")), + (core.dtype("uint64"), core.dtype("uint64"),): ("__nv_ullmin", core.dtype("uint64")), + (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fminf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fmin", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def max(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.int32, core.int32,): ("__nv_max", core.int32), - (core.uint32, core.uint32,): ("__nv_umax", core.uint32), - (core.int64, core.int64,): ("__nv_llmax", core.int64), - (core.uint64, core.uint64,): ("__nv_ullmax", core.uint64), - (core.float32, core.float32,): ("__nv_fmaxf", core.float32), - (core.float64, core.float64,): ("__nv_fmax", core.float64), + {(core.dtype("int32"), core.dtype("int32"),): ("__nv_max", core.dtype("int32")), + (core.dtype("uint32"), core.dtype("uint32"),): ("__nv_umax", core.dtype("uint32")), + (core.dtype("int64"), core.dtype("int64"),): ("__nv_llmax", core.dtype("int64")), + (core.dtype("uint64"), core.dtype("uint64"),): ("__nv_ullmax", core.dtype("uint64")), + (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaxf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fmax", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def mulhi(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.int32, core.int32,): ("__nv_mulhi", core.int32), - (core.uint32, core.uint32,): ("__nv_umulhi", core.uint32), - (core.int64, core.int64,): ("__nv_mul64hi", core.int64), - (core.uint64, core.uint64,): ("__nv_umul64hi", core.uint64), + {(core.dtype("int32"), core.dtype("int32"),): ("__nv_mulhi", core.dtype("int32")), + (core.dtype("uint32"), core.dtype("uint32"),): ("__nv_umulhi", core.dtype("uint32")), + (core.dtype("int64"), core.dtype("int64"),): ("__nv_mul64hi", core.dtype("int64")), + (core.dtype("uint64"), core.dtype("uint64"),): ("__nv_umul64hi", core.dtype("uint64")), }, _builder) -@extern.extern +@impl.extern def mul24(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.int32, core.int32,): ("__nv_mul24", core.int32), - (core.uint32, core.uint32,): ("__nv_umul24", core.uint32), + {(core.dtype("int32"), core.dtype("int32"),): ("__nv_mul24", core.dtype("int32")), + (core.dtype("uint32"), core.dtype("uint32"),): ("__nv_umul24", core.dtype("uint32")), }, _builder) -@extern.extern +@impl.extern def brev(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int32,): ("__nv_brev", core.int32), - (core.int64,): ("__nv_brevll", core.int64), + {(core.dtype("int32"),): ("__nv_brev", core.dtype("int32")), + (core.dtype("int64"),): ("__nv_brevll", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def sad(arg0, arg1, arg2, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ], - {(core.int32, core.int32, core.uint32,): ("__nv_sad", core.int32), - (core.uint32, core.uint32, core.uint32,): ("__nv_usad", core.uint32), + {(core.dtype("int32"), core.dtype("int32"), core.dtype("uint32"),): ("__nv_sad", core.dtype("int32")), + (core.dtype("uint32"), core.dtype("uint32"), core.dtype("uint32"),): ("__nv_usad", core.dtype("uint32")), }, _builder) -@extern.extern +@impl.extern def abs(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int32,): ("__nv_abs", core.int32), - (core.int64,): ("__nv_llabs", core.int64), - (core.float32,): ("__nv_fabsf", core.float32), - (core.float64,): ("__nv_fabs", core.float64), + {(core.dtype("int32"),): ("__nv_abs", core.dtype("int32")), + (core.dtype("int64"),): ("__nv_llabs", core.dtype("int64")), + (core.dtype("fp32"),): ("__nv_fabsf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_fabs", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def floor(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_floorf", core.float32), - (core.float64,): ("__nv_floor", core.float64), + {(core.dtype("fp32"),): ("__nv_floorf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_floor", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def rcp64h(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_rcp64h", core.float64), + {(core.dtype("fp64"),): ("__nv_rcp64h", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def rsqrt(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_rsqrtf", core.float32), - (core.float64,): ("__nv_rsqrt", core.float64), + {(core.dtype("fp32"),): ("__nv_rsqrtf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_rsqrt", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def ceil(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_ceil", core.float64), - (core.float32,): ("__nv_ceilf", core.float32), + {(core.dtype("fp64"),): ("__nv_ceil", core.dtype("fp64")), + (core.dtype("fp32"),): ("__nv_ceilf", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def trunc(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_trunc", core.float64), - (core.float32,): ("__nv_truncf", core.float32), + {(core.dtype("fp64"),): ("__nv_trunc", core.dtype("fp64")), + (core.dtype("fp32"),): ("__nv_truncf", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def exp2(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_exp2f", core.float32), - (core.float64,): ("__nv_exp2", core.float64), + {(core.dtype("fp32"),): ("__nv_exp2f", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_exp2", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def saturatef(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_saturatef", core.float32), + {(core.dtype("fp32"),): ("__nv_saturatef", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def fma_rn(arg0, arg1, arg2, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ], - {(core.float32, core.float32, core.float32,): ("__nv_fmaf_rn", core.float32), - (core.float64, core.float64, core.float64,): ("__nv_fma_rn", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_rn", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma_rn", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def fma_rz(arg0, arg1, arg2, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ], - {(core.float32, core.float32, core.float32,): ("__nv_fmaf_rz", core.float32), - (core.float64, core.float64, core.float64,): ("__nv_fma_rz", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_rz", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma_rz", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def fma_rd(arg0, arg1, arg2, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ], - {(core.float32, core.float32, core.float32,): ("__nv_fmaf_rd", core.float32), - (core.float64, core.float64, core.float64,): ("__nv_fma_rd", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_rd", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma_rd", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def fma_ru(arg0, arg1, arg2, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ], - {(core.float32, core.float32, core.float32,): ("__nv_fmaf_ru", core.float32), - (core.float64, core.float64, core.float64,): ("__nv_fma_ru", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf_ru", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma_ru", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def fast_dividef(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_fast_fdividef", core.float32), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fast_fdividef", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def div_rn(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_fdiv_rn", core.float32), - (core.float64, core.float64,): ("__nv_ddiv_rn", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdiv_rn", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_ddiv_rn", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def div_rz(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_fdiv_rz", core.float32), - (core.float64, core.float64,): ("__nv_ddiv_rz", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdiv_rz", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_ddiv_rz", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def div_rd(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_fdiv_rd", core.float32), - (core.float64, core.float64,): ("__nv_ddiv_rd", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdiv_rd", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_ddiv_rd", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def div_ru(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_fdiv_ru", core.float32), - (core.float64, core.float64,): ("__nv_ddiv_ru", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdiv_ru", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_ddiv_ru", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def rcp_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_frcp_rn", core.float32), - (core.float64,): ("__nv_drcp_rn", core.float64), + {(core.dtype("fp32"),): ("__nv_frcp_rn", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_drcp_rn", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def rcp_rz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_frcp_rz", core.float32), - (core.float64,): ("__nv_drcp_rz", core.float64), + {(core.dtype("fp32"),): ("__nv_frcp_rz", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_drcp_rz", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def rcp_rd(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_frcp_rd", core.float32), - (core.float64,): ("__nv_drcp_rd", core.float64), + {(core.dtype("fp32"),): ("__nv_frcp_rd", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_drcp_rd", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def rcp_ru(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_frcp_ru", core.float32), - (core.float64,): ("__nv_drcp_ru", core.float64), + {(core.dtype("fp32"),): ("__nv_frcp_ru", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_drcp_ru", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def sqrt_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_fsqrt_rn", core.float32), - (core.float64,): ("__nv_dsqrt_rn", core.float64), + {(core.dtype("fp32"),): ("__nv_fsqrt_rn", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_dsqrt_rn", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def sqrt_rz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_fsqrt_rz", core.float32), - (core.float64,): ("__nv_dsqrt_rz", core.float64), + {(core.dtype("fp32"),): ("__nv_fsqrt_rz", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_dsqrt_rz", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def sqrt_rd(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_fsqrt_rd", core.float32), - (core.float64,): ("__nv_dsqrt_rd", core.float64), + {(core.dtype("fp32"),): ("__nv_fsqrt_rd", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_dsqrt_rd", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def sqrt_ru(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_fsqrt_ru", core.float32), - (core.float64,): ("__nv_dsqrt_ru", core.float64), + {(core.dtype("fp32"),): ("__nv_fsqrt_ru", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_dsqrt_ru", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def sqrt(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_sqrtf", core.float32), - (core.float64,): ("__nv_sqrt", core.float64), + {(core.dtype("fp32"),): ("__nv_sqrtf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_sqrt", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def add_rn(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float64, core.float64,): ("__nv_dadd_rn", core.float64), - (core.float32, core.float32,): ("__nv_fadd_rn", core.float32), + {(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dadd_rn", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fadd_rn", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def add_rz(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float64, core.float64,): ("__nv_dadd_rz", core.float64), - (core.float32, core.float32,): ("__nv_fadd_rz", core.float32), + {(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dadd_rz", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fadd_rz", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def add_rd(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float64, core.float64,): ("__nv_dadd_rd", core.float64), - (core.float32, core.float32,): ("__nv_fadd_rd", core.float32), + {(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dadd_rd", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fadd_rd", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def add_ru(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float64, core.float64,): ("__nv_dadd_ru", core.float64), - (core.float32, core.float32,): ("__nv_fadd_ru", core.float32), + {(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dadd_ru", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fadd_ru", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def mul_rn(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float64, core.float64,): ("__nv_dmul_rn", core.float64), - (core.float32, core.float32,): ("__nv_fmul_rn", core.float32), + {(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dmul_rn", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmul_rn", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def mul_rz(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float64, core.float64,): ("__nv_dmul_rz", core.float64), - (core.float32, core.float32,): ("__nv_fmul_rz", core.float32), + {(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dmul_rz", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmul_rz", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def mul_rd(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float64, core.float64,): ("__nv_dmul_rd", core.float64), - (core.float32, core.float32,): ("__nv_fmul_rd", core.float32), + {(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dmul_rd", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmul_rd", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def mul_ru(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float64, core.float64,): ("__nv_dmul_ru", core.float64), - (core.float32, core.float32,): ("__nv_fmul_ru", core.float32), + {(core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dmul_ru", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmul_ru", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def double2float_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2float_rn", core.float32), + {(core.dtype("fp64"),): ("__nv_double2float_rn", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def double2float_rz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2float_rz", core.float32), + {(core.dtype("fp64"),): ("__nv_double2float_rz", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def double2float_rd(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2float_rd", core.float32), + {(core.dtype("fp64"),): ("__nv_double2float_rd", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def double2float_ru(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2float_ru", core.float32), + {(core.dtype("fp64"),): ("__nv_double2float_ru", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def double2int_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2int_rn", core.int32), + {(core.dtype("fp64"),): ("__nv_double2int_rn", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def double2int_rz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2int_rz", core.int32), + {(core.dtype("fp64"),): ("__nv_double2int_rz", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def double2int_rd(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2int_rd", core.int32), + {(core.dtype("fp64"),): ("__nv_double2int_rd", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def double2int_ru(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2int_ru", core.int32), + {(core.dtype("fp64"),): ("__nv_double2int_ru", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def double2uint_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2uint_rn", core.int32), + {(core.dtype("fp64"),): ("__nv_double2uint_rn", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def double2uint_rz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2uint_rz", core.int32), + {(core.dtype("fp64"),): ("__nv_double2uint_rz", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def double2uint_rd(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2uint_rd", core.int32), + {(core.dtype("fp64"),): ("__nv_double2uint_rd", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def double2uint_ru(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2uint_ru", core.int32), + {(core.dtype("fp64"),): ("__nv_double2uint_ru", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def int2double_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int32,): ("__nv_int2double_rn", core.float64), + {(core.dtype("int32"),): ("__nv_int2double_rn", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def uint2double_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.uint32,): ("__nv_uint2double_rn", core.float64), + {(core.dtype("uint32"),): ("__nv_uint2double_rn", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def float2int_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float2int_rn", core.int32), + {(core.dtype("fp32"),): ("__nv_float2int_rn", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def float2int_rz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float2int_rz", core.int32), + {(core.dtype("fp32"),): ("__nv_float2int_rz", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def float2int_rd(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float2int_rd", core.int32), + {(core.dtype("fp32"),): ("__nv_float2int_rd", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def float2int_ru(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float2int_ru", core.int32), + {(core.dtype("fp32"),): ("__nv_float2int_ru", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def float2uint_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float2uint_rn", core.int32), + {(core.dtype("fp32"),): ("__nv_float2uint_rn", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def float2uint_rz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float2uint_rz", core.int32), + {(core.dtype("fp32"),): ("__nv_float2uint_rz", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def float2uint_rd(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float2uint_rd", core.int32), + {(core.dtype("fp32"),): ("__nv_float2uint_rd", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def float2uint_ru(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float2uint_ru", core.int32), + {(core.dtype("fp32"),): ("__nv_float2uint_ru", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def int2float_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int32,): ("__nv_int2float_rn", core.float32), + {(core.dtype("int32"),): ("__nv_int2float_rn", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def int2float_rz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int32,): ("__nv_int2float_rz", core.float32), + {(core.dtype("int32"),): ("__nv_int2float_rz", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def int2float_rd(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int32,): ("__nv_int2float_rd", core.float32), + {(core.dtype("int32"),): ("__nv_int2float_rd", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def int2float_ru(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int32,): ("__nv_int2float_ru", core.float32), + {(core.dtype("int32"),): ("__nv_int2float_ru", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def uint2float_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.uint32,): ("__nv_uint2float_rn", core.float32), + {(core.dtype("uint32"),): ("__nv_uint2float_rn", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def uint2float_rz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.uint32,): ("__nv_uint2float_rz", core.float32), + {(core.dtype("uint32"),): ("__nv_uint2float_rz", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def uint2float_rd(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.uint32,): ("__nv_uint2float_rd", core.float32), + {(core.dtype("uint32"),): ("__nv_uint2float_rd", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def uint2float_ru(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.uint32,): ("__nv_uint2float_ru", core.float32), + {(core.dtype("uint32"),): ("__nv_uint2float_ru", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def hiloint2double(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.int32, core.int32,): ("__nv_hiloint2double", core.float64), + {(core.dtype("int32"), core.dtype("int32"),): ("__nv_hiloint2double", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def double2loint(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2loint", core.int32), + {(core.dtype("fp64"),): ("__nv_double2loint", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def double2hiint(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2hiint", core.int32), + {(core.dtype("fp64"),): ("__nv_double2hiint", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def float2ll_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float2ll_rn", core.int64), + {(core.dtype("fp32"),): ("__nv_float2ll_rn", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def float2ll_rz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float2ll_rz", core.int64), + {(core.dtype("fp32"),): ("__nv_float2ll_rz", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def float2ll_rd(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float2ll_rd", core.int64), + {(core.dtype("fp32"),): ("__nv_float2ll_rd", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def float2ll_ru(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float2ll_ru", core.int64), + {(core.dtype("fp32"),): ("__nv_float2ll_ru", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def float2ull_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float2ull_rn", core.int64), + {(core.dtype("fp32"),): ("__nv_float2ull_rn", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def float2ull_rz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float2ull_rz", core.int64), + {(core.dtype("fp32"),): ("__nv_float2ull_rz", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def float2ull_rd(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float2ull_rd", core.int64), + {(core.dtype("fp32"),): ("__nv_float2ull_rd", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def float2ull_ru(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float2ull_ru", core.int64), + {(core.dtype("fp32"),): ("__nv_float2ull_ru", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def double2ll_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2ll_rn", core.int64), + {(core.dtype("fp64"),): ("__nv_double2ll_rn", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def double2ll_rz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2ll_rz", core.int64), + {(core.dtype("fp64"),): ("__nv_double2ll_rz", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def double2ll_rd(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2ll_rd", core.int64), + {(core.dtype("fp64"),): ("__nv_double2ll_rd", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def double2ll_ru(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2ll_ru", core.int64), + {(core.dtype("fp64"),): ("__nv_double2ll_ru", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def double2ull_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2ull_rn", core.int64), + {(core.dtype("fp64"),): ("__nv_double2ull_rn", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def double2ull_rz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2ull_rz", core.int64), + {(core.dtype("fp64"),): ("__nv_double2ull_rz", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def double2ull_rd(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2ull_rd", core.int64), + {(core.dtype("fp64"),): ("__nv_double2ull_rd", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def double2ull_ru(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double2ull_ru", core.int64), + {(core.dtype("fp64"),): ("__nv_double2ull_ru", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def ll2float_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int64,): ("__nv_ll2float_rn", core.float32), + {(core.dtype("int64"),): ("__nv_ll2float_rn", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def ll2float_rz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int64,): ("__nv_ll2float_rz", core.float32), + {(core.dtype("int64"),): ("__nv_ll2float_rz", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def ll2float_rd(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int64,): ("__nv_ll2float_rd", core.float32), + {(core.dtype("int64"),): ("__nv_ll2float_rd", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def ll2float_ru(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int64,): ("__nv_ll2float_ru", core.float32), + {(core.dtype("int64"),): ("__nv_ll2float_ru", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def ull2float_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.uint64,): ("__nv_ull2float_rn", core.float32), + {(core.dtype("uint64"),): ("__nv_ull2float_rn", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def ull2float_rz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.uint64,): ("__nv_ull2float_rz", core.float32), + {(core.dtype("uint64"),): ("__nv_ull2float_rz", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def ull2float_rd(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.uint64,): ("__nv_ull2float_rd", core.float32), + {(core.dtype("uint64"),): ("__nv_ull2float_rd", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def ull2float_ru(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.uint64,): ("__nv_ull2float_ru", core.float32), + {(core.dtype("uint64"),): ("__nv_ull2float_ru", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def ll2double_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int64,): ("__nv_ll2double_rn", core.float64), + {(core.dtype("int64"),): ("__nv_ll2double_rn", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def ll2double_rz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int64,): ("__nv_ll2double_rz", core.float64), + {(core.dtype("int64"),): ("__nv_ll2double_rz", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def ll2double_rd(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int64,): ("__nv_ll2double_rd", core.float64), + {(core.dtype("int64"),): ("__nv_ll2double_rd", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def ll2double_ru(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int64,): ("__nv_ll2double_ru", core.float64), + {(core.dtype("int64"),): ("__nv_ll2double_ru", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def ull2double_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.uint64,): ("__nv_ull2double_rn", core.float64), + {(core.dtype("uint64"),): ("__nv_ull2double_rn", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def ull2double_rz(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.uint64,): ("__nv_ull2double_rz", core.float64), + {(core.dtype("uint64"),): ("__nv_ull2double_rz", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def ull2double_rd(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.uint64,): ("__nv_ull2double_rd", core.float64), + {(core.dtype("uint64"),): ("__nv_ull2double_rd", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def ull2double_ru(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.uint64,): ("__nv_ull2double_ru", core.float64), + {(core.dtype("uint64"),): ("__nv_ull2double_ru", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def int_as_float(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int32,): ("__nv_int_as_float", core.float32), + {(core.dtype("int32"),): ("__nv_int_as_float", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def float_as_int(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float_as_int", core.int32), + {(core.dtype("fp32"),): ("__nv_float_as_int", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def uint_as_float(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.uint32,): ("__nv_uint_as_float", core.float32), + {(core.dtype("uint32"),): ("__nv_uint_as_float", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def float_as_uint(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_float_as_uint", core.int32), + {(core.dtype("fp32"),): ("__nv_float_as_uint", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def longlong_as_double(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int64,): ("__nv_longlong_as_double", core.float64), + {(core.dtype("int64"),): ("__nv_longlong_as_double", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def double_as_longlong(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_double_as_longlong", core.int64), + {(core.dtype("fp64"),): ("__nv_double_as_longlong", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def fast_sinf(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_fast_sinf", core.float32), + {(core.dtype("fp32"),): ("__nv_fast_sinf", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def fast_cosf(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_fast_cosf", core.float32), + {(core.dtype("fp32"),): ("__nv_fast_cosf", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def fast_log2f(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_fast_log2f", core.float32), + {(core.dtype("fp32"),): ("__nv_fast_log2f", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def fast_logf(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_fast_logf", core.float32), + {(core.dtype("fp32"),): ("__nv_fast_logf", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def fast_expf(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_fast_expf", core.float32), + {(core.dtype("fp32"),): ("__nv_fast_expf", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def fast_tanf(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_fast_tanf", core.float32), + {(core.dtype("fp32"),): ("__nv_fast_tanf", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def fast_exp10f(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_fast_exp10f", core.float32), + {(core.dtype("fp32"),): ("__nv_fast_exp10f", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def fast_log10f(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_fast_log10f", core.float32), + {(core.dtype("fp32"),): ("__nv_fast_log10f", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def fast_powf(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_fast_powf", core.float32), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fast_powf", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def hadd(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.int32, core.int32,): ("__nv_hadd", core.int32), - (core.uint32, core.uint32,): ("__nv_uhadd", core.uint32), + {(core.dtype("int32"), core.dtype("int32"),): ("__nv_hadd", core.dtype("int32")), + (core.dtype("uint32"), core.dtype("uint32"),): ("__nv_uhadd", core.dtype("uint32")), }, _builder) -@extern.extern +@impl.extern def rhadd(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.int32, core.int32,): ("__nv_rhadd", core.int32), - (core.uint32, core.uint32,): ("__nv_urhadd", core.uint32), + {(core.dtype("int32"), core.dtype("int32"),): ("__nv_rhadd", core.dtype("int32")), + (core.dtype("uint32"), core.dtype("uint32"),): ("__nv_urhadd", core.dtype("uint32")), }, _builder) -@extern.extern +@impl.extern def sub_rn(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_fsub_rn", core.float32), - (core.float64, core.float64,): ("__nv_dsub_rn", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fsub_rn", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dsub_rn", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def sub_rz(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_fsub_rz", core.float32), - (core.float64, core.float64,): ("__nv_dsub_rz", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fsub_rz", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dsub_rz", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def sub_rd(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_fsub_rd", core.float32), - (core.float64, core.float64,): ("__nv_dsub_rd", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fsub_rd", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dsub_rd", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def sub_ru(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_fsub_ru", core.float32), - (core.float64, core.float64,): ("__nv_dsub_ru", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fsub_ru", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_dsub_ru", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def rsqrt_rn(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_frsqrt_rn", core.float32), + {(core.dtype("fp32"),): ("__nv_frsqrt_rn", core.dtype("fp32")), }, _builder) -@extern.extern +@impl.extern def ffs(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.int32,): ("__nv_ffs", core.int32), - (core.int64,): ("__nv_ffsll", core.int32), + {(core.dtype("int32"),): ("__nv_ffs", core.dtype("int32")), + (core.dtype("int64"),): ("__nv_ffsll", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def rint(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_rintf", core.float32), - (core.float64,): ("__nv_rint", core.float64), + {(core.dtype("fp32"),): ("__nv_rintf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_rint", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def llrint(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_llrintf", core.int64), - (core.float64,): ("__nv_llrint", core.int64), + {(core.dtype("fp32"),): ("__nv_llrintf", core.dtype("int64")), + (core.dtype("fp64"),): ("__nv_llrint", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def nearbyint(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_nearbyintf", core.float32), - (core.float64,): ("__nv_nearbyint", core.float64), + {(core.dtype("fp32"),): ("__nv_nearbyintf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_nearbyint", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def isnan(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_isnanf", core.int32), - (core.float64,): ("__nv_isnand", core.int32), + {(core.dtype("fp32"),): ("__nv_isnanf", core.dtype("int32")), + (core.dtype("fp64"),): ("__nv_isnand", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def signbit(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_signbitf", core.int32), - (core.float64,): ("__nv_signbitd", core.int32), + {(core.dtype("fp32"),): ("__nv_signbitf", core.dtype("int32")), + (core.dtype("fp64"),): ("__nv_signbitd", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def copysign(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_copysignf", core.float32), - (core.float64, core.float64,): ("__nv_copysign", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_copysignf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_copysign", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def finitef(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_finitef", core.int32), + {(core.dtype("fp32"),): ("__nv_finitef", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def isinf(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_isinff", core.int32), - (core.float64,): ("__nv_isinfd", core.int32), + {(core.dtype("fp32"),): ("__nv_isinff", core.dtype("int32")), + (core.dtype("fp64"),): ("__nv_isinfd", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def nextafter(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_nextafterf", core.float32), - (core.float64, core.float64,): ("__nv_nextafter", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_nextafterf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_nextafter", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def sin(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_sinf", core.float32), - (core.float64,): ("__nv_sin", core.float64), + {(core.dtype("fp32"),): ("__nv_sinf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_sin", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def cos(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_cosf", core.float32), - (core.float64,): ("__nv_cos", core.float64), + {(core.dtype("fp32"),): ("__nv_cosf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_cos", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def sinpi(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_sinpif", core.float32), - (core.float64,): ("__nv_sinpi", core.float64), + {(core.dtype("fp32"),): ("__nv_sinpif", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_sinpi", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def cospi(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_cospif", core.float32), - (core.float64,): ("__nv_cospi", core.float64), + {(core.dtype("fp32"),): ("__nv_cospif", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_cospi", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def tan(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_tanf", core.float32), - (core.float64,): ("__nv_tan", core.float64), + {(core.dtype("fp32"),): ("__nv_tanf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_tan", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def log2(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_log2f", core.float32), - (core.float64,): ("__nv_log2", core.float64), + {(core.dtype("fp32"),): ("__nv_log2f", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_log2", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def exp(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_expf", core.float32), - (core.float64,): ("__nv_exp", core.float64), + {(core.dtype("fp32"),): ("__nv_expf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_exp", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def exp10(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_exp10f", core.float32), - (core.float64,): ("__nv_exp10", core.float64), + {(core.dtype("fp32"),): ("__nv_exp10f", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_exp10", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def cosh(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_coshf", core.float32), - (core.float64,): ("__nv_cosh", core.float64), + {(core.dtype("fp32"),): ("__nv_coshf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_cosh", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def sinh(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_sinhf", core.float32), - (core.float64,): ("__nv_sinh", core.float64), + {(core.dtype("fp32"),): ("__nv_sinhf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_sinh", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def tanh(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_tanhf", core.float32), - (core.float64,): ("__nv_tanh", core.float64), + {(core.dtype("fp32"),): ("__nv_tanhf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_tanh", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def atan2(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_atan2f", core.float32), - (core.float64, core.float64,): ("__nv_atan2", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_atan2f", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_atan2", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def atan(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_atanf", core.float32), - (core.float64,): ("__nv_atan", core.float64), + {(core.dtype("fp32"),): ("__nv_atanf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_atan", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def asin(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_asinf", core.float32), - (core.float64,): ("__nv_asin", core.float64), + {(core.dtype("fp32"),): ("__nv_asinf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_asin", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def acos(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_acosf", core.float32), - (core.float64,): ("__nv_acos", core.float64), + {(core.dtype("fp32"),): ("__nv_acosf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_acos", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def log(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_logf", core.float32), - (core.float64,): ("__nv_log", core.float64), + {(core.dtype("fp32"),): ("__nv_logf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_log", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def log10(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_log10f", core.float32), - (core.float64,): ("__nv_log10", core.float64), + {(core.dtype("fp32"),): ("__nv_log10f", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_log10", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def log1p(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_log1pf", core.float32), - (core.float64,): ("__nv_log1p", core.float64), + {(core.dtype("fp32"),): ("__nv_log1pf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_log1p", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def acosh(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_acoshf", core.float32), - (core.float64,): ("__nv_acosh", core.float64), + {(core.dtype("fp32"),): ("__nv_acoshf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_acosh", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def asinh(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_asinhf", core.float32), - (core.float64,): ("__nv_asinh", core.float64), + {(core.dtype("fp32"),): ("__nv_asinhf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_asinh", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def atanh(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_atanhf", core.float32), - (core.float64,): ("__nv_atanh", core.float64), + {(core.dtype("fp32"),): ("__nv_atanhf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_atanh", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def expm1(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_expm1f", core.float32), - (core.float64,): ("__nv_expm1", core.float64), + {(core.dtype("fp32"),): ("__nv_expm1f", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_expm1", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def hypot(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_hypotf", core.float32), - (core.float64, core.float64,): ("__nv_hypot", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_hypotf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_hypot", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def rhypot(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_rhypotf", core.float32), - (core.float64, core.float64,): ("__nv_rhypot", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_rhypotf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_rhypot", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def norm3d(arg0, arg1, arg2, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ], - {(core.float32, core.float32, core.float32,): ("__nv_norm3df", core.float32), - (core.float64, core.float64, core.float64,): ("__nv_norm3d", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_norm3df", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_norm3d", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def rnorm3d(arg0, arg1, arg2, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ], - {(core.float32, core.float32, core.float32,): ("__nv_rnorm3df", core.float32), - (core.float64, core.float64, core.float64,): ("__nv_rnorm3d", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_rnorm3df", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_rnorm3d", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def norm4d(arg0, arg1, arg2, arg3, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, arg3, ], - {(core.float32, core.float32, core.float32, core.float32,): ("__nv_norm4df", core.float32), - (core.float64, core.float64, core.float64, core.float64,): ("__nv_norm4d", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_norm4df", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_norm4d", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def rnorm4d(arg0, arg1, arg2, arg3, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, arg3, ], - {(core.float32, core.float32, core.float32, core.float32,): ("__nv_rnorm4df", core.float32), - (core.float64, core.float64, core.float64, core.float64,): ("__nv_rnorm4d", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_rnorm4df", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_rnorm4d", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def cbrt(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_cbrtf", core.float32), - (core.float64,): ("__nv_cbrt", core.float64), + {(core.dtype("fp32"),): ("__nv_cbrtf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_cbrt", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def rcbrt(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_rcbrtf", core.float32), - (core.float64,): ("__nv_rcbrt", core.float64), + {(core.dtype("fp32"),): ("__nv_rcbrtf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_rcbrt", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def j0(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_j0f", core.float32), - (core.float64,): ("__nv_j0", core.float64), + {(core.dtype("fp32"),): ("__nv_j0f", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_j0", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def j1(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_j1f", core.float32), - (core.float64,): ("__nv_j1", core.float64), + {(core.dtype("fp32"),): ("__nv_j1f", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_j1", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def y0(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_y0f", core.float32), - (core.float64,): ("__nv_y0", core.float64), + {(core.dtype("fp32"),): ("__nv_y0f", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_y0", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def y1(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_y1f", core.float32), - (core.float64,): ("__nv_y1", core.float64), + {(core.dtype("fp32"),): ("__nv_y1f", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_y1", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def yn(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.int32, core.float32,): ("__nv_ynf", core.float32), - (core.int32, core.float64,): ("__nv_yn", core.float64), + {(core.dtype("int32"), core.dtype("fp32"),): ("__nv_ynf", core.dtype("fp32")), + (core.dtype("int32"), core.dtype("fp64"),): ("__nv_yn", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def jn(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.int32, core.float32,): ("__nv_jnf", core.float32), - (core.int32, core.float64,): ("__nv_jn", core.float64), + {(core.dtype("int32"), core.dtype("fp32"),): ("__nv_jnf", core.dtype("fp32")), + (core.dtype("int32"), core.dtype("fp64"),): ("__nv_jn", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def cyl_bessel_i0(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_cyl_bessel_i0f", core.float32), - (core.float64,): ("__nv_cyl_bessel_i0", core.float64), + {(core.dtype("fp32"),): ("__nv_cyl_bessel_i0f", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_cyl_bessel_i0", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def cyl_bessel_i1(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_cyl_bessel_i1f", core.float32), - (core.float64,): ("__nv_cyl_bessel_i1", core.float64), + {(core.dtype("fp32"),): ("__nv_cyl_bessel_i1f", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_cyl_bessel_i1", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def erf(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_erff", core.float32), - (core.float64,): ("__nv_erf", core.float64), + {(core.dtype("fp32"),): ("__nv_erff", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_erf", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def erfinv(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_erfinvf", core.float32), - (core.float64,): ("__nv_erfinv", core.float64), + {(core.dtype("fp32"),): ("__nv_erfinvf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_erfinv", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def erfc(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_erfcf", core.float32), - (core.float64,): ("__nv_erfc", core.float64), + {(core.dtype("fp32"),): ("__nv_erfcf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_erfc", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def erfcx(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_erfcxf", core.float32), - (core.float64,): ("__nv_erfcx", core.float64), + {(core.dtype("fp32"),): ("__nv_erfcxf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_erfcx", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def erfcinv(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_erfcinvf", core.float32), - (core.float64,): ("__nv_erfcinv", core.float64), + {(core.dtype("fp32"),): ("__nv_erfcinvf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_erfcinv", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def normcdfinv(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_normcdfinvf", core.float32), - (core.float64,): ("__nv_normcdfinv", core.float64), + {(core.dtype("fp32"),): ("__nv_normcdfinvf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_normcdfinv", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def normcdf(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_normcdff", core.float32), - (core.float64,): ("__nv_normcdf", core.float64), + {(core.dtype("fp32"),): ("__nv_normcdff", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_normcdf", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def lgamma(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_lgammaf", core.float32), - (core.float64,): ("__nv_lgamma", core.float64), + {(core.dtype("fp32"),): ("__nv_lgammaf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_lgamma", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def ldexp(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.int32,): ("__nv_ldexpf", core.float32), - (core.float64, core.int32,): ("__nv_ldexp", core.float64), + {(core.dtype("fp32"), core.dtype("int32"),): ("__nv_ldexpf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("int32"),): ("__nv_ldexp", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def scalbn(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.int32,): ("__nv_scalbnf", core.float32), - (core.float64, core.int32,): ("__nv_scalbn", core.float64), + {(core.dtype("fp32"), core.dtype("int32"),): ("__nv_scalbnf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("int32"),): ("__nv_scalbn", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def fmod(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_fmodf", core.float32), - (core.float64, core.float64,): ("__nv_fmod", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmodf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fmod", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def remainder(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_remainderf", core.float32), - (core.float64, core.float64,): ("__nv_remainder", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_remainderf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_remainder", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def fma(arg0, arg1, arg2, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, arg2, ], - {(core.float32, core.float32, core.float32,): ("__nv_fmaf", core.float32), - (core.float64, core.float64, core.float64,): ("__nv_fma", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fmaf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fma", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def pow(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.int32,): ("__nv_powif", core.float32), - (core.float64, core.int32,): ("__nv_powi", core.float64), - (core.float32, core.float32,): ("__nv_powf", core.float32), - (core.float64, core.float64,): ("__nv_pow", core.float64), + {(core.dtype("fp32"), core.dtype("int32"),): ("__nv_powif", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("int32"),): ("__nv_powi", core.dtype("fp64")), + (core.dtype("fp32"), core.dtype("fp32"),): ("__nv_powf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_pow", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def tgamma(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_tgammaf", core.float32), - (core.float64,): ("__nv_tgamma", core.float64), + {(core.dtype("fp32"),): ("__nv_tgammaf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_tgamma", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def round(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_roundf", core.float32), - (core.float64,): ("__nv_round", core.float64), + {(core.dtype("fp32"),): ("__nv_roundf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_round", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def llround(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_llroundf", core.int64), - (core.float64,): ("__nv_llround", core.int64), + {(core.dtype("fp32"),): ("__nv_llroundf", core.dtype("int64")), + (core.dtype("fp64"),): ("__nv_llround", core.dtype("int64")), }, _builder) -@extern.extern +@impl.extern def fdim(arg0, arg1, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, arg1, ], - {(core.float32, core.float32,): ("__nv_fdimf", core.float32), - (core.float64, core.float64,): ("__nv_fdim", core.float64), + {(core.dtype("fp32"), core.dtype("fp32"),): ("__nv_fdimf", core.dtype("fp32")), + (core.dtype("fp64"), core.dtype("fp64"),): ("__nv_fdim", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def ilogb(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_ilogbf", core.int32), - (core.float64,): ("__nv_ilogb", core.int32), + {(core.dtype("fp32"),): ("__nv_ilogbf", core.dtype("int32")), + (core.dtype("fp64"),): ("__nv_ilogb", core.dtype("int32")), }, _builder) -@extern.extern +@impl.extern def logb(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float32,): ("__nv_logbf", core.float32), - (core.float64,): ("__nv_logb", core.float64), + {(core.dtype("fp32"),): ("__nv_logbf", core.dtype("fp32")), + (core.dtype("fp64"),): ("__nv_logb", core.dtype("fp64")), }, _builder) -@extern.extern +@impl.extern def isfinited(arg0, _builder=None): return extern.elementwise("libdevice", LIBDEVICE_PATH, [arg0, ], - {(core.float64,): ("__nv_isfinited", core.int32), + {(core.dtype("fp64"),): ("__nv_isfinited", core.dtype("int32")), }, _builder) diff --git a/python/triton/language/random.py b/python/triton/language/random.py index 67de92c43dfd..32183ec9b2d0 100644 --- a/python/triton/language/random.py +++ b/python/triton/language/random.py @@ -1,10 +1,10 @@ import triton from . import core as tl -PHILOX_KEY_A: tl.constexpr = -1640531527 # 0x9E3779B9 -PHILOX_KEY_B: tl.constexpr = -1150833019 # 0xBB67AE85 -PHILOX_ROUND_A: tl.constexpr = -766435501 # 0xD2511F53 -PHILOX_ROUND_B: tl.constexpr = -845247145 # 0xCD9E8D57 +PHILOX_KEY_A: tl.constexpr = 0x9E3779B9 +PHILOX_KEY_B: tl.constexpr = 0xBB67AE85 +PHILOX_ROUND_A: tl.constexpr = 0xD2511F53 +PHILOX_ROUND_B: tl.constexpr = 0xCD9E8D57 N_ROUNDS_DEFAULT = 10 # Default number of rounds for philox # ------------------- diff --git a/python/triton/language/semantic.py b/python/triton/language/semantic.py index 62e4a30bdcc1..f819b171039f 100644 --- a/python/triton/language/semantic.py +++ b/python/triton/language/semantic.py @@ -7,12 +7,12 @@ # Create custom exception that prints message "hello" -class IncompatibleTypeErrorimpl(Exception): +class IncompatibleTypeErrorImpl(Exception): def __init__(self, type_a, type_b): self.type_a = type_a self.type_b = type_b self.message = "invalid operands of type " + self.type_a.__repr__() + " and " + self.type_b.__repr__() - super(IncompatibleTypeErrorimpl, self).__init__(self.message) + super(IncompatibleTypeErrorImpl, self).__init__(self.message) # ===----------------------------------------------------------------------===## @@ -88,13 +88,13 @@ def computation_type_impl(a_ty: tl.dtype, b_ty: tl.dtype, div_or_mod: bool) -> t def check_ptr_type_impl(type_a: tl.dtype, type_b: tl.dtype, allow_ptr_a: bool) -> None: if type_a.is_ptr(): if not allow_ptr_a: - raise IncompatibleTypeErrorimpl(type_a, type_b) + raise IncompatibleTypeErrorImpl(type_a, type_b) # T* + U* with T != U if type_b.is_ptr() and (type_a != type_b): - raise IncompatibleTypeErrorimpl(type_a, type_b) + raise IncompatibleTypeErrorImpl(type_a, type_b) # T* + float if type_b.is_floating(): - raise IncompatibleTypeErrorimpl(type_a, type_b) + raise IncompatibleTypeErrorImpl(type_a, type_b) def binary_op_type_checking_impl(lhs: tl.tensor, @@ -129,7 +129,7 @@ def add(input: tl.tensor, if other_scalar_ty.is_ptr() and not input_scalar_ty.is_ptr(): input, other = other, input if input_scalar_ty.is_ptr(): - return tl.tensor(builder.create_gep(input.handle, [other.handle]), input.type) + return tl.tensor(builder.create_addptr(input.handle, other.handle), input.type) # float + float elif input_scalar_ty.is_floating(): return tl.tensor(builder.create_fadd(input.handle, other.handle), input.type) @@ -146,7 +146,7 @@ def sub(input: tl.tensor, scalar_ty = input.type.scalar # ptr - offset if scalar_ty.is_ptr(): - return tl.tensor(builder.create_gep(input.handle, [minus(other, builder).handle]), + return tl.tensor(builder.create_addptr(input.handle, minus(other, builder).handle), input.type) # float - float if scalar_ty.is_floating(): @@ -223,10 +223,9 @@ def fdiv(input: tl.tensor, input_scalar_ty = input.type.scalar other_scalar_ty = other.type.scalar if not input_scalar_ty.is_floating() or not other_scalar_ty.is_floating(): - raise ValueError("both operands of fdiv must have floating poscalar type") + raise ValueError("both operands of fdiv must have floating scalar type") input, other = binary_op_type_checking_impl(input, other, builder, False, False, False, True) ret = builder.create_fdiv(input.handle, other.handle) - ret.set_fdiv_ieee_rounding(ieee_rounding) return tl.tensor(ret, input.type) @@ -238,7 +237,11 @@ def mod(input: tl.tensor, other_scalar_ty = other.type.scalar # float % float if scalar_ty.is_floating(): - return tl.tensor(builder.create_frem(input.handle, other.handle), input.type) + # input - input.div(other, rounding_mode="floor") * other + ret = sub(input, mul(floor(fdiv(input, other, False, builder), builder), + other, builder), + builder) + return ret # % int elif scalar_ty.is_int(): if scalar_ty.int_signedness != other_scalar_ty.int_signedness: @@ -263,7 +266,7 @@ def bitwise_op_type_checking_impl(input: tl.tensor, input_sca_ty = input.type.scalar other_sca_ty = other.type.scalar if not input_sca_ty.is_int() or not other_sca_ty.is_int(): - raise IncompatibleTypeErrorimpl(input_sca_ty, other_sca_ty) + raise IncompatibleTypeErrorImpl(input_sca_ty, other_sca_ty) ret_sca_ty = integer_promote_impl(input_sca_ty, other_sca_ty) if ret_sca_ty != input_sca_ty: input = cast(input, ret_sca_ty, builder) @@ -293,6 +296,22 @@ def xor_(input: tl.tensor, return tl.tensor(builder.create_xor(input.handle, other.handle), input.type) +def logical_and(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + if not input.type.is_int1(): + input = bitcast(input, tl.dtype("int1"), builder) + if not other.type.is_int1(): + other = bitcast(other, tl.dtype("int1"), builder) + return and_(input, other, builder) + + +def logical_or(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: + if not input.type.is_int1(): + input = bitcast(input, tl.dtype("int1"), builder) + if not other.type.is_int1(): + other = bitcast(other, tl.dtype("int1"), builder) + return or_(input, other, builder) + + def lshr(input: tl.tensor, other: tl.tensor, builder: ir.builder) -> tl.tensor: @@ -320,7 +339,7 @@ def minus(input: tl.tensor, input_sca_ty = input.type.scalar if input_sca_ty.is_ptr(): raise ValueError("wrong type argument to unary minus (" + input_sca_ty.__repr__() + ")") - _0 = tl.tensor(ir.constant.get_null_value(input_sca_ty.to_ir(builder)), input_sca_ty) + _0 = tl.tensor(builder.get_null_value(input_sca_ty.to_ir(builder)), input_sca_ty) return sub(_0, input, builder) @@ -329,7 +348,7 @@ def invert(input: tl.tensor, input_sca_ty = input.type.scalar if input_sca_ty.is_ptr() or input_sca_ty.is_floating(): raise ValueError("wrong type argument to unary invert (" + input_sca_ty.__repr__() + ")") - _1 = tl.tensor(ir.constant.get_all_ones_value(input_sca_ty.to_ir(builder)), input_sca_ty) + _1 = tl.tensor(builder.get_all_ones_value(input_sca_ty.to_ir(builder)), input_sca_ty) return xor_(input, _1, builder) @@ -449,11 +468,11 @@ def arange(start: int, end: int, builder: ir.builder) -> tl.tensor: shape = [end - start] ret_ty = tl.block_type(tl.int32, shape) - return tl.tensor(builder.get_range(start, end), ret_ty) + return tl.tensor(builder.create_make_range(start, end), ret_ty) def zeros(shape: List[int], dtype: tl.dtype, builder: ir.builder) -> tl.tensor: - _0 = ir.constant.get_null_value(dtype.to_ir(builder)) + _0 = builder.get_null_value(dtype.to_ir(builder)) ret_ty = tl.block_type(dtype, shape) return tl.tensor(builder.create_splat(_0, shape), ret_ty) @@ -462,24 +481,40 @@ def zeros(shape: List[int], dtype: tl.dtype, builder: ir.builder) -> tl.tensor: # ===----------------------------------------------------------------------===// -def reshape(input: tl.tensor, - dst_shape: List[int], - builder: ir.builder) -> tl.tensor: +def view(input: tl.tensor, + dst_shape: List[int], + builder: ir.builder) -> tl.tensor: + # TODO: disable when TritonToTritonGPU handles views properly + + # assert len(input.shape) == len(dst_shape) numel = 1 for s in dst_shape: numel *= s if input.type.numel != numel: - raise ValueError("cannot reshape block of different shape") + raise ValueError("cannot view block of different shape") ret_ty = tl.block_type(input.type.scalar, dst_shape) - return tl.tensor(builder.create_reshape(input.handle, dst_shape), ret_ty) + return tl.tensor(builder.create_view(input.handle, dst_shape), ret_ty) -def cat(lhs: tl.tensor, rhs: tl.tensor, builder: ir.builder) -> tl.tensor: - assert lhs.type.is_block() and rhs.type.is_block() - assert lhs.type.shape[1:] == rhs.type.shape[1:] - ret_shape = [lhs.type.shape[0] + rhs.type.shape[0]] - ret_ty = tl.block_type(lhs.type.scalar, ret_shape) - return tl.tensor(builder.create_cat(lhs.handle, rhs.handle), ret_ty) +def expand_dims(input: tl.tensor, axis: int, builder: ir.builder) -> tl.tensor: + dst_shape = [s for s in input.type.shape] + dst_shape.insert(axis, 1) + ret_ty = tl.block_type(input.type.scalar, dst_shape) + return tl.tensor(builder.create_expand_dims(input.handle, axis), ret_ty) + + +def cat(lhs: tl.tensor, rhs: tl.tensor, can_reorder: bool, builder: ir.builder) -> tl.tensor: + assert can_reorder, "current implementation of `cat` always may reorder elements" + assert len(lhs.shape) == 1 + ret_type = tl.block_type(lhs.type.scalar, [lhs.shape[0] + rhs.shape[0]]) + return tl.tensor(builder.create_cat(lhs.handle, rhs.handle), ret_type) + + +def trans(input: tl.tensor, builder: ir.builder) -> tl.tensor: + if len(input.shape) != 2: + raise ValueError("Only 2D tensors can be transposed") + ret_type = tl.block_type(input.type.scalar, [input.shape[1], input.shape[0]]) + return tl.tensor(builder.create_trans(input.handle), ret_type) def broadcast_impl_shape(input: tl.tensor, @@ -496,7 +531,7 @@ def broadcast_impl_shape(input: tl.tensor, for i in range(len(src_shape)): if shape[i] != src_shape[i] and src_shape[i] != 1: raise ValueError(f"Cannot broadcast, the expanded size of the tensor ({shape[i]})" - f" must match the existing size ({src_shape[1]}) at non-singleton dimension" + f" must match the existing size ({src_shape[i]}) at non-singleton dimension" f" {i}: {src_shape}, {shape}") ret_ty = tl.block_type(input.type.scalar, shape) return tl.tensor(builder.create_broadcast(input.handle, shape), ret_ty) @@ -520,8 +555,21 @@ def broadcast_impl_value(lhs: tl.tensor, elif lhs_ty.is_block() and rhs_ty.is_block(): lhs_shape = lhs_ty.get_block_shapes() rhs_shape = rhs_ty.get_block_shapes() - if len(lhs_shape) != len(rhs_shape): - raise ValueError("Cannot make_shape_compatible: blocks must have the same rank") + + if len(lhs_shape) < len(rhs_shape): + # Add new axes to lhs + for dim in range(len(lhs_shape), len(rhs_shape)): + lhs = tl.tensor(builder.create_expand_dims(lhs.handle, dim), tl.block_type(lhs_ty.scalar, lhs_shape + [1])) + lhs_ty = lhs.type + lhs_shape = lhs_ty.get_block_shapes() + elif len(rhs_shape) < len(lhs_shape): + # Add new axes to rhs + for dim in range(len(rhs_shape), len(lhs_shape)): + rhs = tl.tensor(builder.create_expand_dims(rhs.handle, dim), tl.block_type(rhs_ty.scalar, rhs_shape + [1])) + rhs_ty = rhs.type + rhs_shape = rhs_ty.get_block_shapes() + assert len(rhs_shape) == len(lhs_shape) + ret_shape = [] for i in range(len(lhs_shape)): left = lhs_shape[i] @@ -544,31 +592,6 @@ def broadcast_impl_value(lhs: tl.tensor, # (scalar, scalar) => returns original blocks return lhs, rhs - -####### -# dequantize -####### - -def dequantize(input: tl.tensor, - scale: tl.tensor, - shift: tl.tensor, - nbit: int, - dst_ty: tl.dtype, - builder: ir.builder) -> tl.tensor: - input_ty = input.type - assert input_ty.is_block() - assert input_ty.element_ty.is_int32() or input_ty.element_ty.is_int16() - assert nbit in [2, 4, 8] - assert dst_ty == tl.float16 - - shape = input_ty.get_block_shapes() - factor = input_ty.element_ty.primitive_bitwidth // nbit - dst_shape = shape[:-1] + [factor * shape[-1]] - - dst_ty = tl.block_type(dst_ty, dst_shape) - return tl.tensor(builder.create_dequantize(input.handle, scale.handle, shift.handle, dst_ty.to_ir(builder)), dst_ty) - - ####### # cast ####### @@ -600,62 +623,78 @@ def cast(input: tl.tensor, dst_ty: tl.dtype, builder: ir.builder) -> tl.tensor: src_ty = input.type - if src_ty.is_block() and not dst_ty.is_block(): + if src_ty.is_block(): dst_ty = tl.block_type(dst_ty, input.type.get_block_shapes()) if src_ty == dst_ty: return input + src_sca_ty = src_ty.scalar dst_sca_ty = dst_ty.scalar - # fp8 <=> bf16/fp16 - if (src_sca_ty.is_bf16() or src_sca_ty.is_fp16()) and dst_sca_ty.is_fp8(): - return tl.tensor(builder.create_fp_trunc(input.handle, dst_ty.to_ir(builder)), - dst_ty) - if src_sca_ty.is_fp8() and (dst_sca_ty.is_bf16() or dst_sca_ty.is_fp16()): - return tl.tensor(builder.create_fp_ext(input.handle, dst_ty.to_ir(builder)), + + # Casting with customized floating types involved: fp8 <=> bf16, fp16, fp32, fp64 + if (src_sca_ty.is_customized_floating() and dst_sca_ty.is_floating()) or \ + (src_sca_ty.is_floating() and dst_sca_ty.is_customized_floating()): + return tl.tensor(builder.create_fp_to_fp(input.handle, dst_ty.to_ir(builder)), dst_ty) + # bf16 <=> (not fp32) - if (src_sca_ty.is_bf16() and not dst_sca_ty.is_fp32()) or \ - (dst_sca_ty.is_bf16() and not src_sca_ty.is_fp32()): + if (src_sca_ty.is_fp16() and not dst_sca_ty.is_fp32()) or \ + (src_sca_ty.is_bf16() and not dst_sca_ty.is_fp32()): return cast(cast(input, tl.float32, builder), dst_sca_ty, builder) - # FP Truncation + # Standard floating types' casting: truncation + # fp64 => fp32, fp16, bf16 + # fp32 => fp16, bf16 truncate_fp = src_sca_ty.is_floating() and \ dst_sca_ty.is_floating() and \ - src_sca_ty.fp_mantissa_width > dst_sca_ty.fp_mantissa_width + src_sca_ty.primitive_bitwidth > dst_sca_ty.primitive_bitwidth if truncate_fp: return tl.tensor(builder.create_fp_trunc(input.handle, dst_ty.to_ir(builder)), dst_ty) - # FP Extension + # Standard floating types' casting: extension + # fp32 => fp64 + # fp16 => fp32, fp64 + # bf16 => fp32, fp64 ext_fp = src_sca_ty.is_floating() and \ dst_sca_ty.is_floating() and \ - src_sca_ty.fp_mantissa_width < dst_sca_ty.fp_mantissa_width + src_sca_ty.primitive_bitwidth < dst_sca_ty.primitive_bitwidth if ext_fp: return tl.tensor(builder.create_fp_ext(input.handle, dst_ty.to_ir(builder)), dst_ty) - # Int cast + # Casting between integer types if src_sca_ty.is_int() and dst_sca_ty.is_int() and \ (src_sca_ty.int_bitwidth != dst_sca_ty.int_bitwidth or src_sca_ty.int_signedness != dst_sca_ty.int_signedness): sign_extend = src_sca_ty.is_int_signed() and not src_sca_ty.is_bool() - return tl.tensor(builder.create_int_cast(input.handle, - dst_ty.to_ir(builder), sign_extend), - dst_ty) - - # Float to Int - if src_sca_ty.is_floating() and dst_sca_ty.is_int(): - # TODO: is this correct? if dst_sca_ty.is_bool(): - return not_equal(input, tl._to_tensor(0, builder), builder) + ty = input.dtype.to_ir(builder) + _0 = tl.tensor(builder.get_null_value(ty), input.dtype) + return not_equal(input, _0, builder) else: + return tl.tensor(builder.create_int_cast(input.handle, + dst_ty.to_ir(builder), sign_extend), + dst_ty) + + # Casting standard floating types to integer types + if src_sca_ty.is_standard_floating() and dst_sca_ty.is_int(): + if dst_sca_ty.is_bool(): + ty = input.dtype.to_ir(builder) + _0 = tl.tensor(builder.get_null_value(ty), input.dtype) + return not_equal(input, _0, builder) + elif dst_sca_ty.is_int_signed(): return tl.tensor(builder.create_fp_to_si(input.handle, dst_ty.to_ir(builder)), dst_ty) + else: + return tl.tensor(builder.create_fp_to_ui(input.handle, + dst_ty.to_ir(builder)), + dst_ty) - # int => float - if src_sca_ty.is_int() and dst_sca_ty.is_floating(): + # Casting integer types to standard floating types + if src_sca_ty.is_int() and dst_sca_ty.is_standard_floating(): if src_sca_ty.is_bool() or not src_sca_ty.is_int_signed(): return tl.tensor(builder.create_ui_to_fp(input.handle, dst_ty.to_ir(builder)), @@ -665,7 +704,7 @@ def cast(input: tl.tensor, dst_ty.to_ir(builder)), dst_ty) - # ptr => int + # Casting pointer types to integer types if src_sca_ty.is_ptr() and dst_sca_ty.is_int(): bitwidth = dst_sca_ty.int_bitwidth if bitwidth == 64: @@ -676,19 +715,14 @@ def cast(input: tl.tensor, tl.tensor(builder.get_int64(0), tl.int64), builder) - if not src_sca_ty.is_ptr() and dst_sca_ty.is_ptr(): + # Casting integer types to pointer types + if src_sca_ty.is_int() and dst_sca_ty.is_ptr(): return tl.tensor(builder.create_int_to_ptr(input.handle, dst_ty.to_ir(builder)), dst_ty) - # Ptr . Ptr + + # Casting pointer types to pointer types if src_sca_ty.is_ptr() and dst_sca_ty.is_ptr(): return tl.tensor(builder.create_bitcast(input.handle, dst_ty.to_ir(builder)), dst_ty) - # * . Bool - if dst_sca_ty.is_bool(): - if src_sca_ty.is_ptr(): - input = cast(input, tl.int64, builder) - other = builder.get_int64(0) - if src_ty.is_bool(): - other = builder.create_splat(other, src_ty.get_block_shapes()) - return tl.tensor(builder.create_icmpNE(input.handle, other), dst_ty) + assert False, f'cannot cast {input} to {dst_ty}' # ===----------------------------------------------------------------------===// @@ -696,18 +730,6 @@ def cast(input: tl.tensor, # ===----------------------------------------------------------------------===// -def _parse_eviction_policy(eviction_policy): - eviction = ir.EVICTION_POLICY.NORMAL # default - if eviction_policy: - if eviction_policy == "evict_last": - eviction = ir.EVICTION_POLICY.EVICT_LAST - elif eviction_policy == "evict_first": - eviction = ir.EVICTION_POLICY.EVICT_FIRST - else: - raise ValueError(f"Eviction policy {eviction_policy} not supported") - return eviction - - def load(ptr: tl.tensor, mask: Optional[tl.tensor], other: Optional[tl.tensor], @@ -723,16 +745,18 @@ def load(ptr: tl.tensor, if other: other = broadcast_impl_shape(other, ptr.type.get_block_shapes(), builder) - if other: - other = cast(other, ptr.type.scalar.element_ty, builder) ptr_ty = ptr.type.scalar elt_ty = ptr_ty.element_ty + # treat bool* as tl.int8* if elt_ty == tl.int1: elt_ty = tl.int8 ptr_ty = tl.pointer_type(elt_ty, ptr_ty.address_space) ptr = cast(ptr, ptr_ty, builder) + if other: + other = cast(other, elt_ty, builder) + # cache modifier cache = ir.CACHE_MODIFIER.NONE # default if cache_modifier: @@ -744,7 +768,14 @@ def load(ptr: tl.tensor, raise ValueError(f"Cache modifier {cache_modifier} not supported") # eviction policy - eviction = _parse_eviction_policy(eviction_policy) + eviction = ir.EVICTION_POLICY.NORMAL # default + if eviction_policy: + if eviction_policy == "evict_last": + eviction = ir.EVICTION_POLICY.EVICT_LAST + elif eviction_policy == "evict_first": + eviction = ir.EVICTION_POLICY.EVICT_FIRST + else: + raise ValueError(f"Eviction policy {eviction_policy} not supported") if ptr.type.is_block(): shape = ptr.type.get_block_shapes() @@ -752,29 +783,22 @@ def load(ptr: tl.tensor, else: dst_ty = elt_ty - if not mask and not other: + if not mask: + if other: + raise ValueError("`other` cannot be provided without `mask`") return tl.tensor(builder.create_load(ptr.handle, cache, eviction, is_volatile), dst_ty) - if not mask: - raise ValueError("`other` cannot be provided without `mask`") - - if not other: - other_ir = ir.undef.get(elt_ty.to_ir(builder)) - if ptr.type.is_block(): - other_ir = builder.create_splat(other_ir, ptr.type.get_block_shapes()) - other = tl.tensor(other_ir, dst_ty) - - return tl.tensor(builder.create_masked_load(ptr.handle, - mask.handle, - other.handle, - cache, eviction, is_volatile), - dst_ty) + else: + return tl.tensor(builder.create_masked_load(ptr.handle, + mask.handle, + other.handle if other else None, + cache, eviction, is_volatile), + dst_ty) def store(ptr: tl.tensor, val: tl.tensor, mask: Optional[tl.tensor], - eviction_policy: str, builder: ir.builder) -> tl.tensor: if not ptr.type.scalar.is_ptr(): raise ValueError("Pointer argument of store instruction is " + ptr.type.__repr__()) @@ -786,20 +810,17 @@ def store(ptr: tl.tensor, elt_ty = ptr_ty.element_ty # treat bool* as tl.int8* if elt_ty == tl.int1: - # convert to bool first and then store as int8 - val = cast(val, tl.int1, builder) elt_ty = tl.int8 ptr_ty = tl.pointer_type(elt_ty, ptr_ty.address_space) ptr = cast(ptr, ptr_ty, builder) - # eviction policy - eviction = _parse_eviction_policy(eviction_policy) + # cast to target data-type val = cast(val, elt_ty, builder) if not mask: - return tl.tensor(builder.create_store(ptr.handle, val.handle, eviction), tl.void) + return tl.tensor(builder.create_store(ptr.handle, val.handle), tl.void) if not mask.type.scalar.is_bool(): raise ValueError("Mask must have boolean scalar type") - return tl.tensor(builder.create_masked_store(ptr.handle, val.handle, mask.handle, eviction), tl.void) + return tl.tensor(builder.create_masked_store(ptr.handle, val.handle, mask.handle), tl.void) ######### # atomic @@ -870,8 +891,8 @@ def atomic_max(ptr: tl.tensor, # return atomic_umin(i_ptr, i_val) if val < 0 i_val = bitcast(val, tl.int32, builder) i_ptr = bitcast(ptr, tl.pointer_type(tl.int32, 1), builder) - pos = greater_equal(val, tl.tensor(ir.constant_float.get(sca_ty.to_ir(builder), 0), sca_ty), builder) - neg = less_than(val, tl.tensor(ir.constant_float.get(sca_ty.to_ir(builder), 0), sca_ty), builder) + pos = greater_equal(val, tl.tensor(builder.get_float32(0), sca_ty), builder) + neg = less_than(val, tl.tensor(builder.get_float32(0), sca_ty), builder) pos_ret = tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.MAX, i_ptr.handle, i_val.handle, and_(mask, pos, builder).handle), i_val.type) neg_ret = tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.UMIN, i_ptr.handle, i_val.handle, and_(mask, neg, builder).handle), i_val.type) return where(pos, pos_ret, neg_ret, builder) @@ -902,8 +923,8 @@ def atomic_min(ptr: tl.tensor, # return atomic_umax(i_ptr, i_val) if val < 0 i_val = bitcast(val, tl.int32, builder) i_ptr = bitcast(ptr, tl.pointer_type(tl.int32, 1), builder) - pos = greater_equal(val, tl.tensor(ir.constant_float.get(sca_ty.to_ir(builder), 0), sca_ty), builder) - neg = less_than(val, tl.tensor(ir.constant_float.get(sca_ty.to_ir(builder), 0), sca_ty), builder) + pos = greater_equal(val, tl.tensor(builder.get_float32(0), sca_ty), builder) + neg = less_than(val, tl.tensor(builder.get_float32(0), sca_ty), builder) pos_ret = tl.tensor(builder.create_atomic_rmw(ir.ATOMIC_OP.MIN, i_ptr.handle, i_val.handle, @@ -963,31 +984,28 @@ def atomic_xchg(ptr: tl.tensor, # ===----------------------------------------------------------------------===// -def dot(a: tl.tensor, - b: tl.tensor, - trans_a: bool, - trans_b: bool, +def dot(lhs: tl.tensor, + rhs: tl.tensor, allow_tf32: bool, builder: ir.builder) -> tl.tensor: - in_a = 1 if not trans_a else 0 - in_b = 1 if trans_b else 0 - assert a.type.is_block() and b.type.is_block() - assert len(a.shape) == 2 and len(b.shape) == 2 - assert a.shape[in_a] == b.shape[in_b] - assert a.shape[0] >= 16 and a.shape[1] >= 16 and b.shape[1] >= 16,\ + assert lhs.type.is_block() and rhs.type.is_block() + assert len(lhs.shape) == 2 and len(rhs.shape) == 2 + assert lhs.shape[1].value == rhs.shape[0].value + assert lhs.shape[0].value >= 16 and lhs.shape[1].value >= 16 \ + and rhs.shape[1].value >= 16,\ "small blocks not supported!" - if a.type.scalar.is_int(): + if lhs.type.scalar.is_int(): _0 = builder.get_int32(0) ret_scalar_ty = tl.int32 else: _0 = builder.get_float32(0) ret_scalar_ty = tl.float32 - M = a.type.shape[in_a ^ 1] - N = b.type.shape[in_b ^ 1] + M = lhs.type.shape[0] + N = rhs.type.shape[1] _0 = builder.create_splat(_0, [M, N]) ret_ty = tl.block_type(ret_scalar_ty, [M, N]) - ret = builder.create_dot(a.handle, b.handle, _0, trans_a, trans_b, allow_tf32) - return tl.tensor(ret, ret_ty) + return tl.tensor(builder.create_dot(lhs.handle, rhs.handle, _0, allow_tf32), + ret_ty) # ===----------------------------------------------------------------------===// @@ -1010,11 +1028,11 @@ def where(condition: tl.tensor, ret_ty = x.type return tl.tensor(builder.create_select(condition.handle, x.handle, y.handle), ret_ty) - # ===----------------------------------------------------------------------===// # Reductions # ===----------------------------------------------------------------------=== + def reduce_impl(input: tl.tensor, axis: int, builder: ir.builder, name: str, FLOAT_OP: ir.REDUCE_OP, INT_OP: ir.REDUCE_OP) -> tl.tensor: scalar_ty = input.type.scalar @@ -1045,10 +1063,11 @@ def reduce_impl(input: tl.tensor, axis: int, builder: ir.builder, name: str, for i, s in enumerate(shape): if i != axis: ret_shape.append(s) - if len(ret_shape) == 0: - res_ty = scalar_ty - else: + if ret_shape: res_ty = tl.block_type(scalar_ty, ret_shape) + else: + # 0d-tensor -> scalar + res_ty = scalar_ty if scalar_ty.is_floating(): return tl.tensor(builder.create_reduce(input.handle, FLOAT_OP, axis), res_ty) @@ -1084,25 +1103,19 @@ def xor_sum(input: tl.tensor, axis: int, builder: ir.builder) -> tl.tensor: return reduce_impl(input, axis, builder, "sum", ir.REDUCE_OP.XOR, ir.REDUCE_OP.XOR) -# ----------------------- -# Utilities -# ----------------------- - -def clock(builder: ir.builder) -> tl.tensor: - return tl.tensor(builder.create_clock(), tl.int64) - - -def globaltimer(builder: ir.builder) -> tl.tensor: - return tl.tensor(builder.create_globaltimer, tl.int64) - - # ===----------------------------------------------------------------------=== # Math # ===----------------------------------------------------------------------=== def umulhi(x: tl.tensor, y: tl.tensor, builder: ir.builder) -> tl.tensor: x, y = binary_op_type_checking_impl(x, y, builder) - return tl.tensor(builder.create_umulhi(x.handle, y.handle), x.type) + from . import libdevice + return libdevice.mulhi(x, y, _builder=builder) + + +def floor(x: tl.tensor, builder: ir.builder) -> tl.tensor: + from . import libdevice + return libdevice.floor(x, _builder=builder) def exp(x: tl.tensor, builder: ir.builder) -> tl.tensor: @@ -1130,16 +1143,23 @@ def sqrt(x: tl.tensor, builder: ir.builder) -> tl.tensor: def multiple_of(x: tl.tensor, values: List[int]) -> tl.tensor: if len(x.shape) != len(values): raise ValueError("Shape of input to multiple_of does not match the length of values") - x.handle.multiple_of(values) + x.handle.set_attr("tt.divisibility", ir.make_attr(values, x.handle.get_context())) return x def max_contiguous(x: tl.tensor, values: List[int]) -> tl.tensor: if len(x.shape) != len(values): raise ValueError("Shape of input to max_contiguous does not match the length of values") - x.handle.max_contiguous(values) + x.handle.set_attr("tt.contiguity", ir.make_attr(values, x.handle.get_context())) return x def debug_barrier(builder: ir.builder) -> tl.tensor: - return tl.tensor(builder.create_barrier(''), tl.void) + return tl.tensor(builder.create_barrier(), tl.void) + + +def printf(prefix: str, args: List[tl.tensor], builder: ir.builder) -> tl.tensor: + new_args = [] + for arg in args: + new_args.append(arg.handle) + return tl.tensor(builder.create_printf(prefix, new_args), tl.void) diff --git a/python/triton/ops/__init__.py b/python/triton/ops/__init__.py index dcaed8ccfc36..a55cb08d7a37 100644 --- a/python/triton/ops/__init__.py +++ b/python/triton/ops/__init__.py @@ -1,5 +1,12 @@ -# flake8: noqa: F401 -#from .conv import _conv, conv +# from .conv import _conv, conv from . import blocksparse from .cross_entropy import _cross_entropy, cross_entropy from .matmul import _matmul, matmul + +__all__ = [ + "blocksparse", + "_cross_entropy", + "cross_entropy", + "_matmul", + "matmul", +] diff --git a/python/triton/ops/blocksparse/__init__.py b/python/triton/ops/blocksparse/__init__.py index df3353e1291d..6b24b5377fab 100644 --- a/python/triton/ops/blocksparse/__init__.py +++ b/python/triton/ops/blocksparse/__init__.py @@ -1,3 +1,7 @@ -# flake8: noqa: F401 from .matmul import matmul from .softmax import softmax + +__all__ = [ + "matmul", + "softmax", +] diff --git a/python/triton/ops/blocksparse/softmax.py b/python/triton/ops/blocksparse/softmax.py index 33223b72df2b..bb915be138fa 100644 --- a/python/triton/ops/blocksparse/softmax.py +++ b/python/triton/ops/blocksparse/softmax.py @@ -18,8 +18,8 @@ def num_warps(n): @triton.jit def _blocksparse_softmax_fwd( - Out, A, LUT, R, stride_xz, - extent, stride_zr, stride_hr, # relative attention + Out, A, stride_xz, LUT, + R, extent, stride_zr, stride_hr, # relative attention scale, is_causal, ROW_SIZE: tl.constexpr, BLOCK_SIZE: tl.constexpr, @@ -164,8 +164,8 @@ def forward( # enqueue kernel out = torch.empty_like(a) _blocksparse_softmax_fwd[grid]( - out, a, lut, rel_logits, a.stride(0), - rel_shape[-1], rel_strides[0], rel_strides[1], # relative attn + out, a, a.stride(0), lut, + rel_logits, rel_shape[-1], rel_strides[0], rel_strides[1], # relative attn scale, is_causal, BLOCK_SIZE=block, diff --git a/python/triton/ops/matmul_perf_model.py b/python/triton/ops/matmul_perf_model.py index 004f236b968c..c6e9c1af465f 100644 --- a/python/triton/ops/matmul_perf_model.py +++ b/python/triton/ops/matmul_perf_model.py @@ -10,7 +10,9 @@ def get_tensorcore_tflops(backend, device, num_ctas, num_warps, dtype): ''' return compute throughput in TOPS ''' total_warps = num_ctas * min(num_warps, 4) - num_subcores = _triton.runtime.num_sm(backend, device) * 4 # on recent GPUs + triton.compiler.init_cuda_utils() + + num_subcores = triton.compiler.cuda_utils.get_device_properties(device)["multiprocessor_count"] * 4 # on recent GPUs tflops = min(num_subcores, total_warps) / num_subcores * get_max_tensorcore_tflops(dtype, backend, device) return tflops @@ -18,14 +20,14 @@ def get_tensorcore_tflops(backend, device, num_ctas, num_warps, dtype): def get_simd_tflops(backend, device, num_ctas, num_warps, dtype): ''' return compute throughput in TOPS ''' total_warps = num_ctas * min(num_warps, 4) - num_subcores = _triton.runtime.num_sm(backend, device) * 4 # on recent GPUs + num_subcores = triton.compiler.cuda_utils.get_device_properties(device)["multiprocessor_count"] * 4 # on recent GPUs tflops = min(num_subcores, total_warps) / num_subcores * get_max_simd_tflops(dtype, backend, device) return tflops def get_tflops(backend, device, num_ctas, num_warps, dtype): - cc = _triton.runtime.cc(backend, device) - if cc < 80 and dtype == torch.float32: + capability = torch.cuda.get_device_capability(device) + if capability[0] < 8 and dtype == torch.float32: return get_simd_tflops(backend, device, num_ctas, num_warps, dtype) return get_tensorcore_tflops(backend, device, num_ctas, num_warps, dtype) @@ -59,7 +61,7 @@ def estimate_matmul_time( compute_ms = total_ops / tput # time to load data - num_sm = _triton.runtime.num_sm(backend, device) + num_sm = triton.compiler.cuda_utils.get_device_properties(device)["multiprocessor_count"] active_cta_ratio = min(1, num_ctas / num_sm) active_cta_ratio_bw1 = min(1, num_ctas / 32) # 32 active ctas are enough to saturate active_cta_ratio_bw2 = max(min(1, (num_ctas - 32) / (108 - 32)), 0) # 32-108, remaining 5% @@ -97,9 +99,8 @@ def estimate_matmul_time( def early_config_prune(configs, named_args): - backend = _triton.runtime.backend.CUDA device = torch.cuda.current_device() - cc = _triton.runtime.cc(backend, device) + capability = torch.cuda.get_device_capability() # BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps, num_stages dtsize = named_args['A'].element_size() dtype = named_args['A'].dtype @@ -110,7 +111,10 @@ def early_config_prune(configs, named_args): kw = config.kwargs BLOCK_M, BLOCK_N, BLOCK_K, num_stages = \ kw['BLOCK_M'], kw['BLOCK_N'], kw['BLOCK_K'], config.num_stages - max_shared_memory = _triton.runtime.max_shared_memory(backend, device) + + # TODO: move to `cuda_utils` submodule + triton.compiler.init_cuda_utils() + max_shared_memory = triton.compiler.cuda_utils.get_device_properties(device)["max_shared_mem"] required_shared_memory = (BLOCK_M + BLOCK_N) * BLOCK_K * num_stages * dtsize if required_shared_memory <= max_shared_memory: pruned_configs.append(config) @@ -136,7 +140,7 @@ def early_config_prune(configs, named_args): pruned_configs = [] for k, v in configs_map.items(): BLOCK_M, BLOCK_N, BLOCK_K, SPLIT_K, num_warps = k - if cc >= 80: + if capability[0] >= 8: # compute cycles (only works for ampere GPUs) mmas = BLOCK_M * BLOCK_N * BLOCK_K / (16 * 8 * 16) mma_cycles = mmas / min(4, num_warps) * 8 diff --git a/python/triton/runtime/__init__.py b/python/triton/runtime/__init__.py index d9946c27cf02..bb7066eb5b66 100644 --- a/python/triton/runtime/__init__.py +++ b/python/triton/runtime/__init__.py @@ -1,2 +1,12 @@ -from .autotuner import Config, Heuristics, autotune, heuristics # noqa: F401 -from .jit import JITFunction, KernelInterface, version_key # noqa: F401 +from .autotuner import Config, Heuristics, autotune, heuristics +from .jit import JITFunction, KernelInterface, version_key + +__all__ = [ + "Config", + "Heuristics", + "autotune", + "heuristics", + "JITFunction", + "KernelInterface", + "version_key", +] diff --git a/python/triton/runtime/autotuner.py b/python/triton/runtime/autotuner.py index 8ec16c477d45..4dc21c57ef58 100644 --- a/python/triton/runtime/autotuner.py +++ b/python/triton/runtime/autotuner.py @@ -4,6 +4,7 @@ import time from typing import Dict +from ..compiler import OutOfResources from ..testing import do_bench from .jit import KernelInterface @@ -60,7 +61,10 @@ def kernel_call(): config.pre_hook(self.nargs) self.hook(args) self.fn.run(*args, num_warps=config.num_warps, num_stages=config.num_stages, **current) - return do_bench(kernel_call) + try: + return do_bench(kernel_call) + except OutOfResources: + return float('inf') def run(self, *args, **kwargs): self.nargs = dict(zip(self.arg_names, args)) @@ -118,7 +122,6 @@ def warmup(self, *args, **kwargs): class Config: """ An object that represents a possible kernel configuration for the auto-tuner to try. - :ivar meta: a dictionary of meta-parameters to pass to the kernel as keyword arguments. :type meta: dict[Str, Any] :ivar num_warps: the number of warps to use for the kernel when compiled for GPUs. For example, if @@ -150,10 +153,8 @@ def __str__(self): def autotune(configs, key, prune_configs_by=None, reset_to_zero=None): """ Decorator for auto-tuning a :code:`triton.jit`'d function. - .. highlight:: python .. code-block:: python - @triton.autotune(configs=[ triton.Config(meta={'BLOCK_SIZE': 128}, num_warps=4), triton.Config(meta={'BLOCK_SIZE': 1024}, num_warps=8), @@ -164,12 +165,10 @@ def autotune(configs, key, prune_configs_by=None, reset_to_zero=None): @triton.jit def kernel(x_ptr, x_size, **META): BLOCK_SIZE = META['BLOCK_SIZE'] - :note: When all the configurations are evaluated, the kernel will run multiple time. This means that whatever value the kernel updates will be updated multiple times. To avoid this undesired behavior, you can use the `reset_to_zero` argument, which reset the value of the provided tensor to `zero` before running any configuration. - :param configs: a list of :code:`triton.Config` objects :type configs: list[triton.Config] :param key: a list of argument names whose change in value will trigger the evaluation of all provided configs. @@ -204,16 +203,12 @@ def heuristics(values): """ Decorator for specifying how the values of certain meta-parameters may be computed. This is useful for cases where auto-tuning is prohibitevely expensive, or just not applicable. - .. highlight:: python .. code-block:: python - @triton.heuristics(values={'BLOCK_SIZE': lambda args: 2 ** int(math.ceil(math.log2(args[1])))}) @triton.jit def kernel(x_ptr, x_size, **META): BLOCK_SIZE = META['BLOCK_SIZE'] # smallest power-of-two >= x_size - - .param values: a dictionary of meta-parameter names and functions that compute the value of the meta-parameter. each such function takes a list of positional arguments as input. .type values: dict[str, Callable[[list[Any]], Any]] diff --git a/python/triton/runtime/jit.py b/python/triton/runtime/jit.py index 5a234afc2b49..ab1a5516f5f8 100644 --- a/python/triton/runtime/jit.py +++ b/python/triton/runtime/jit.py @@ -8,6 +8,7 @@ import subprocess import textwrap from collections import defaultdict, namedtuple +from typing import Callable, Generic, Iterable, Optional, TypeVar, Union, cast, overload import torch @@ -19,6 +20,9 @@ except ImportError: get_cuda_stream = lambda dev_idx: torch.cuda.current_stream(dev_idx).cuda_stream + +T = TypeVar('T') + # ----------------------------------------------------------------------------- # Dependencies Finder # ----------------------------------------------------------------------------- @@ -94,20 +98,19 @@ def version_key(): return '-'.join(triton.__version__) + '-' + ptxas_version + '-' + '-'.join(contents) -class KernelInterface: +class KernelInterface(Generic[T]): + run: T - def __getitem__(self, grid): + def __getitem__(self, grid) -> T: """ A JIT function is launched with: fn[grid](*args, **kwargs). Hence JITFunction.__getitem__ returns a callable proxy that memorizes the grid. """ - def launcher(*args, **kwargs): - return self.run(*args, grid=grid, **kwargs) - return launcher + return cast(T, functools.partial(cast(Callable, self.run), grid=grid)) -class JITFunction(KernelInterface): +class JITFunction(KernelInterface[T]): # Hook for inspecting compiled functions and modules cache_hook = None @@ -152,8 +155,8 @@ def is_divisible_by_16(x): if x is None: return True return False - divisible_by_16 = [i for i, arg in enumerate(args) if is_divisible_by_16(arg) and i not in self.do_not_specialize] - equal_to_1 = [i for i, arg in enumerate(args) if isinstance(arg, int) and arg == 1 and i not in self.do_not_specialize] + divisible_by_16 = {i for i, arg in enumerate(args) if is_divisible_by_16(arg) and i not in self.do_not_specialize} + equal_to_1 = {i for i, arg in enumerate(args) if isinstance(arg, int) and arg == 1 and i not in self.do_not_specialize} return namedtuple("instance_descriptor", ["divisible_by_16", "equal_to_1"])(tuple(divisible_by_16), tuple(equal_to_1)) # return _triton.code_gen.instance_descriptor(divisible_by_16, equal_to_1) @@ -177,6 +180,9 @@ def _type_of(key): triton.language.uint32: 'u32', triton.language.uint64: 'u64', triton.language.float8: 'fp8', + triton.language.float16: 'fp16', + triton.language.bfloat16: 'bf16', + triton.language.float32: 'fp32', }[key] return f'*{ty}' if key is None: @@ -272,7 +278,7 @@ def {self.fn.__name__}({', '.join(self.arg_names)}, grid, num_warps=4, num_stage if callable(arg): raise TypeError(f"Callable constexpr at index {{i}} is not supported") if not self._call_hook(key, signature, device, constants, num_warps, num_stages, extern_libs, configs): - bin = triton.compile(self, signature, device, constants, num_warps, num_stages, extern_libs=extern_libs, configs=configs) + bin = triton.compile(self, signature=signature, device=device, constants=constants, num_warps=num_warps, num_stages=num_stages, extern_libs=extern_libs, configs=configs) if not warmup: bin.c_wrapper(grid_0, grid_1, grid_2, bin.num_warps, bin.shared, stream, bin.cu_function, triton.compiler.CompiledKernel.launch_enter_hook, triton.compiler.CompiledKernel.launch_exit_hook, bin, *args) self.cache[device][key] = bin @@ -364,29 +370,55 @@ def __repr__(self): # ----------------------------------------------------------------------------- -def jit(*args, **kwargs): +@overload +def jit(fn: T) -> JITFunction[T]: + ... + + +@overload +def jit( + *, + version=None, + do_not_specialize: Optional[Iterable[int]] = None, +) -> Callable[[T], JITFunction[T]]: + ... + + +def jit( + fn: Optional[T] = None, + *, + version=None, + do_not_specialize: Optional[Iterable[int]] = None, +) -> Union[JITFunction[T], Callable[[T], JITFunction[T]]]: """ Decorator for JIT-compiling a function using the Triton compiler. - :note: When a jit'd function is called, :code:`torch.tensor` arguments are implicitly converted to pointers using the :code:`.data_ptr()` method. + :note: When a jit'd function is called, :code:`torch.tensor` arguments are + implicitly converted to pointers using the :code:`.data_ptr()` method. :note: This function will be compiled and run on the GPU. It will only have access to: * python primitives, - * objects within the triton.language package, + * builtins within the triton package, * arguments to this function, * other jit'd functions :param fn: the function to be jit-compiled :type fn: Callable """ - if args: - assert len(args) == 1 - assert callable(args[0]) - return JITFunction(args[0], **kwargs) + + def decorator(fn: T) -> JITFunction[T]: + assert callable(fn) + return JITFunction( + fn, + version=version, + do_not_specialize=do_not_specialize, + ) + + if fn is not None: + return decorator(fn) + else: - def decorator(fn): - return JITFunction(fn, **kwargs) return decorator diff --git a/python/triton/testing.py b/python/triton/testing.py index c83c1e6824af..f277ec1406a8 100644 --- a/python/triton/testing.py +++ b/python/triton/testing.py @@ -16,6 +16,9 @@ _cutlass = None has_cutlass = False +# TODO: move to separate module +import triton + def catch_oor(kernel, pytest_handle=None): try: @@ -34,12 +37,12 @@ def sparsify_tensor(x, mask, block): return ret -def make_pair(shape, device="cuda", alpha=1e-2, beta=0., trans=False, data=None): +def make_pair(shape, device="cuda", alpha=1e-2, beta=0., trans=False, data=None, dtype=torch.float32): if data is None: - data = torch.randn(shape, dtype=torch.float32, device=device) + data = torch.randn(shape, dtype=torch.float32, requires_grad=True, device=device) ref_ret = data ref_ret = ref_ret * alpha + beta - ref_ret = ref_ret.half().float() + ref_ret = ref_ret.half().to(dtype) if trans: ref_ret = ref_ret.t().requires_grad_() ref_ret = ref_ret.detach().requires_grad_() @@ -336,8 +339,8 @@ def get_dram_gbps(backend=None, device=None): backend = _triton.runtime.backend.CUDA if not device: device = torch.cuda.current_device() - mem_clock_khz = _triton.runtime.memory_clock_rate(backend, device) - bus_width = _triton.runtime.global_memory_bus_width(backend, device) + mem_clock_khz = triton.compiler.cuda_utils.get_device_properties(device)["mem_clock_rate"] # in kHz + bus_width = triton.compiler.cuda_utils.get_device_properties(device)["mem_bus_width"] bw_gbps = mem_clock_khz * bus_width * 2 / 1e6 / 8 # In GB/s return bw_gbps @@ -347,11 +350,13 @@ def get_max_tensorcore_tflops(dtype: torch.dtype, backend=None, device=None, clo backend = _triton.runtime.backend.CUDA if not device: device = torch.cuda.current_device() - num_subcores = _triton.runtime.num_sm(backend, device) * 4 # on recent GPUs + + triton.compiler.init_cuda_utils() + num_subcores = triton.compiler.cuda_utils.get_device_properties(device)["multiprocessor_count"] * 4 if not clock_rate: - clock_rate = _triton.runtime.clock_rate(backend, device) # in kHz - cc = _triton.runtime.cc(backend, device) - if cc < 80: + clock_rate = triton.compiler.cuda_utils.get_device_properties(device)["sm_clock_rate"] # in kHz + capability = torch.cuda.get_device_capability(device) + if capability[0] < 8: assert dtype == torch.float16 ops_per_sub_core = 256 # 2 4x4x4 Tensor Cores else: diff --git a/python/triton/tools/aot.py b/python/triton/tools/aot.py new file mode 100644 index 000000000000..7b5a59fe0d0c --- /dev/null +++ b/python/triton/tools/aot.py @@ -0,0 +1,61 @@ +import argparse + +import triton +import triton._C.libtriton.triton as libtriton + +if __name__ == '__main__': + + # valid source and target formats + VALID_FORMATS = ['triton-ir', 'triton-gpu-ir', 'llvm-ir', 'ptx'] + + # set up the argument parser + # TODO: conditional requirements + parser = argparse.ArgumentParser() + parser.add_argument('src', help="Source file to compile") + parser.add_argument('--target', required=True, + help="Target format, one of: " + ', '.join(VALID_FORMATS)) + parser.add_argument('--sm', type=int, help="Compute capability to compile for") + parser.add_argument('--ptx-version', type=int, help="PTX version to compile for") + + # parse the args + args = parser.parse_args() + + # TODO: clean-up and re-use triton.compiler primitive functions + # check for validity of format arguments + if args.target not in VALID_FORMATS: + print("Invalid target format: " + args.target) + exit(0) + + # parse source file to MLIR module + context = libtriton.ir.context() + module = libtriton.ir.parse_mlir_module(args.src, context) + module.context = context + + # optimizer triton-ir + module = triton.compiler.optimize_triton_ir(module) + if args.target == 'triton-ir': + print(module.str()) + exit(0) + + if not args.sm: + raise argparse.ArgumentError(None, "Must specify --sm for PTX compilation") + + # triton-ir -> triton-gpu-ir + module = triton.compiler.ttir_to_ttgir(module, num_warps=4, num_stages=3, compute_capability=args.sm) + if args.target == 'triton-gpu-ir': + print(module.str()) + exit(0) + + # triton-gpu-ir -> llvm-ir + module = triton.compiler.ttgir_to_llir(module, extern_libs=None, compute_capability=args.sm) + if args.target == 'llvm-ir': + print(module) + exit(0) + + if not args.ptx_version: + raise argparse.ArgumentError(None, "Must specify --ptx-version for PTX compilation") + + # llvm-ir -> ptx + module = triton.compiler.llir_to_ptx(module, compute_capability=args.sm, ptx_version=args.ptx_version) + assert args.target == 'ptx' + print(module) diff --git a/python/triton/tools/build_extern.py b/python/triton/tools/build_extern.py index f4141c31f13d..47b7b8846508 100644 --- a/python/triton/tools/build_extern.py +++ b/python/triton/tools/build_extern.py @@ -21,7 +21,6 @@ def __init__( ) -> None: ''' A symbol is a function declaration. - :param name: name of the symbol :param op_name: name of the operation :param ret_type: return type of the operation @@ -65,9 +64,9 @@ def convert_type(type_str) -> Optional[str]: elif type_str == "u64": return "uint64" elif type_str == "float": - return "float32" + return "fp32" elif type_str == "double": - return "float64" + return "fp64" else: # ignore other types, such as pointer types return None @@ -98,7 +97,6 @@ def __init__( ) -> None: ''' Abstract class for extern library. - :param name: name of the library :param path: path of the library :param format: whether to format the generated stub file @@ -154,7 +152,6 @@ class Libdevice(ExternLibrary): def __init__(self, path) -> None: ''' Constructor for Libdevice. - :param path: path of the libdevice library ''' super().__init__("libdevice", path) @@ -177,7 +174,6 @@ def _extract_symbol(self, line) -> Optional[Symbol]: func_strs = func_str.split("(") func_name = func_strs[0].replace("@", "") op_name = func_name.replace("__nv_", "") - # To filter some interfaces unlisted in NVIDIA's official documents. if 'ieee' in op_name: return None # Get arg_types @@ -310,8 +306,8 @@ def _output_stubs(self) -> str: for symbol in symbols: arg_type_symbol_dict_str += "(" for arg_type in symbol.arg_types: - arg_type_symbol_dict_str += f"core.{arg_type}," - ret_type = f"core.{symbol.ret_type}" + arg_type_symbol_dict_str += f'core.dtype("{arg_type}"),' + ret_type = f'core.dtype("{symbol.ret_type}")' arg_type_symbol_dict_str += "): (\"" + symbol.name + "\", " + ret_type + "),\n" arg_type_symbol_dict_str += "}" @@ -331,7 +327,6 @@ class LLVMDisassembler: def __init__(self, path) -> None: ''' Invoke llvm-dis to disassemble the given file. - :param path: path to llvm-dis ''' self._path = path @@ -361,7 +356,6 @@ def build( ) -> None: ''' Interface function to build the library file. - :param llvm_dis_path: path to the llvm-dis binary :param lib_path: path to the external library file :param lib_name: name of the library diff --git a/python/triton/tools/compare_asm.py b/python/triton/tools/compare_asm.py deleted file mode 100644 index e612022bd0d2..000000000000 --- a/python/triton/tools/compare_asm.py +++ /dev/null @@ -1,76 +0,0 @@ -''' -Compare cached triton kernels in 2 directories. - -example: -python compare_asm.py --dir0=triton-works/ --dir1=triton-fails/ --asm=ttir \ - --diff-out0=diff-works.ll --diff-out1=diff-fails.ll -''' -import argparse -import os -import pickle - -parser = argparse.ArgumentParser(description="unpickle") -parser.add_argument('--dir0', dest='dir0', required=True, - help="Triton cache dir 0") -parser.add_argument('--dir1', dest='dir1', required=True, - help="Triton cache dir 1") -parser.add_argument('--asm', dest='asm', - choices=['ttir', 'llir', 'ptx', 'cubin'], required=True) -parser.add_argument('--early-stop', dest='early_stop', action='store_true', - help="Stop after first diff") -parser.set_defaults(early_stop=True) -parser.add_argument('--diff-out0', dest='diff_out0', required=True, - help="output file path for kernels in dir0") -parser.add_argument('--diff-out1', dest='diff_out1', required=True, - help="output file path for kernels in dir1") -args = parser.parse_args() -dir0 = args.dir0 -dir1 = args.dir1 -asm = args.asm - -dir0_files = {} -dir1_files = {} -for root, _, files in os.walk(dir0): - for file in files: - if not file.endswith('.lock'): - path = os.path.join(root, file) - with open(path, 'rb') as f: - loaded_file = pickle.load(f) - bin = loaded_file['binary'] - key = loaded_file['key'] - info = key.split('-')[-3:] # num_warps, num_stages, signature - dict_key = bin.name + '-'.join(info) - dir0_files[dict_key] = bin.asm - -for root, _, files in os.walk(dir1): - for file in files: - if not file.endswith('.lock'): - path = os.path.join(root, file) - with open(path, 'rb') as f: - loaded_file = pickle.load(f) - bin = loaded_file['binary'] - key = loaded_file['key'] - info = key.split('-')[-3:] # num_warps, num_stages, signature - dict_key = bin.name + '-'.join(info) - dir1_files[dict_key] = bin.asm - -diff_keys = [] -for key in dir0_files: - asm0 = dir0_files[key] - if key not in dir1_files: - continue - asm1 = dir1_files[key] - if asm0[asm] != asm1[asm]: - diff_keys.append(key) - -if args.early_stops: - diff_keys = diff_keys[:1] -if diff_keys: - with open(args.diff_out0, 'w') as f0, open(args.diff_out1, 'w') as f1: - for key in diff_keys: - f0.write(f'{asm} mismatch at {key}') - f0.write(dir0_files[key][asm]) - f0.write('\n') - f1.write(f'{asm} mismatch at {key}') - f1.write(dir1_files[key][asm]) - f1.write('\n') diff --git a/python/tutorials/02-fused-softmax.py b/python/tutorials/02-fused-softmax.py index 7447b60af4bf..388604ee100c 100644 --- a/python/tutorials/02-fused-softmax.py +++ b/python/tutorials/02-fused-softmax.py @@ -80,7 +80,7 @@ def softmax_kernel( row = tl.load(input_ptrs, mask=col_offsets < n_cols, other=-float('inf')) # Subtract maximum for numerical stability row_minus_max = row - tl.max(row, axis=0) - # Note that exponentials in Triton are fast but approximate (i.e., think __expf in CUDA) + # Note that exponentiation in Triton is fast but approximate (i.e., think __expf in CUDA) numerator = tl.exp(row_minus_max) denominator = tl.sum(numerator, axis=0) softmax_output = numerator / denominator @@ -188,4 +188,4 @@ def benchmark(M, N, provider): # # - Triton is 4x faster than the Torch JIT. This confirms our suspicions that the Torch JIT does not do any fusion here. # - Triton is noticeably faster than :code:`torch.softmax` -- in addition to being **easier to read, understand and maintain**. -# Note however that the PyTorch `softmax` operation is more general and will works on tensors of any shape. +# Note however that the PyTorch `softmax` operation is more general and will work on tensors of any shape. diff --git a/python/tutorials/03-matrix-multiplication.py b/python/tutorials/03-matrix-multiplication.py index 49382aecd7d6..f11c3bc09dfe 100644 --- a/python/tutorials/03-matrix-multiplication.py +++ b/python/tutorials/03-matrix-multiplication.py @@ -156,16 +156,7 @@ @triton.autotune( configs=[ - triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8), - triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8), - triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), - triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), - triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), - triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), - triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), - triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=4, num_warps=4), - triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2), - triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64, 'BLOCK_SIZE_K': 32, 'GROUP_SIZE_M': 8}, num_stages=5, num_warps=2), + triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256, 'BLOCK_SIZE_K': 64, 'GROUP_SIZE_M': 8}, num_stages=3, num_warps=8), ], key=['M', 'N', 'K'], ) @@ -236,8 +227,8 @@ def matmul_kernel( b_ptrs += BLOCK_SIZE_K * stride_bk # you can fuse arbitrary activation functions here # while the accumulator is still in FP32! - if ACTIVATION == "leaky_relu": - accumulator = leaky_relu(accumulator) + if ACTIVATION: + accumulator = ACTIVATION(accumulator) c = accumulator.to(tl.float16) # ----------------------------------------------------------- @@ -252,7 +243,6 @@ def matmul_kernel( # we can fuse `leaky_relu` by providing it as an `ACTIVATION` meta-parameter in `_matmul` @triton.jit def leaky_relu(x): - x = x + 1 return tl.where(x >= 0, x, 0.01 * x) @@ -261,7 +251,7 @@ def leaky_relu(x): # and (1) checks any shape constraint; (2) allocates the output; (3) launches the above kernel -def matmul(a, b, activation=""): +def matmul(a, b, activation=None): # checks constraints assert a.shape[1] == b.shape[0], "incompatible dimensions" assert a.is_contiguous(), "matrix A must be contiguous" @@ -297,7 +287,7 @@ def matmul(a, b, activation=""): torch.manual_seed(0) a = torch.randn((512, 512), device='cuda', dtype=torch.float16) b = torch.randn((512, 512), device='cuda', dtype=torch.float16) -triton_output = matmul(a, b) +triton_output = matmul(a, b, activation=None) torch_output = torch.matmul(a, b) print(f"triton_output={triton_output}") print(f"torch_output={torch_output}") @@ -319,13 +309,13 @@ def matmul(a, b, activation=""): triton.testing.Benchmark( x_names=['M', 'N', 'K'], # argument names to use as an x-axis for the plot x_vals=[ - 128 * i for i in range(2, 33) + 8192 ], # different possible values for `x_name` line_arg='provider', # argument name whose value corresponds to a different line in the plot # possible values for `line_arg`` - line_vals=['cublas', 'cublas + relu', 'triton', 'triton + relu'], + line_vals=['cublas', 'triton'], # label name for the lines - line_names=["cuBLAS", "cuBLAS (+ torch.nn.LeakyReLU)", "Triton", "Triton (+ LeakyReLU)"], + line_names=["cuBLAS", "Triton"], # line styles styles=[('green', '-'), ('green', '--'), ('blue', '-'), ('blue', '--')], ylabel="TFLOPS", # label name for the y-axis @@ -337,18 +327,9 @@ def benchmark(M, N, K, provider): a = torch.randn((M, K), device='cuda', dtype=torch.float16) b = torch.randn((K, N), device='cuda', dtype=torch.float16) if provider == 'cublas': - ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(a, b)) + ms, min_ms, max_ms = triton.testing.do_bench(lambda: torch.matmul(a, b), rep=100) if provider == 'triton': - ms, min_ms, max_ms = triton.testing.do_bench(lambda: matmul(a, b)) - if provider == 'cublas + relu': - torch_relu = torch.nn.ReLU(inplace=True) - ms, min_ms, max_ms = triton.testing.do_bench( - lambda: torch_relu(torch.matmul(a, b)) - ) - if provider == 'triton + relu': - ms, min_ms, max_ms = triton.testing.do_bench( - lambda: matmul(a, b, activation="leaky_relu") - ) + ms, min_ms, max_ms = triton.testing.do_bench(lambda: matmul(a, b), rep=100) perf = lambda ms: 2 * M * N * K * 1e-12 / (ms * 1e-3) return perf(ms), perf(max_ms), perf(min_ms) diff --git a/python/tutorials/05-layer-norm.py b/python/tutorials/05-layer-norm.py index 333cb80ec69c..4a8688736aa9 100644 --- a/python/tutorials/05-layer-norm.py +++ b/python/tutorials/05-layer-norm.py @@ -19,8 +19,8 @@ @triton.jit def _layer_norm_fwd_fused( - Out, A, + Out, Weight, Bias, Mean, Rstd, @@ -36,14 +36,14 @@ def _layer_norm_fwd_fused( _mean = tl.zeros([BLOCK_SIZE], dtype=tl.float32) for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) - a = tl.load(A + cols, mask=cols < N, other=0., eviction_policy="evict_last").to(tl.float32) + a = tl.load(A + cols, mask=cols < N, other=0.).to(tl.float32) _mean += a mean = tl.sum(_mean, axis=0) / N # compute variance _var = tl.zeros([BLOCK_SIZE], dtype=tl.float32) for off in range(0, N, BLOCK_SIZE): cols = off + tl.arange(0, BLOCK_SIZE) - a = tl.load(A + cols, mask=cols < N, other=0., eviction_policy="evict_last").to(tl.float32) + a = tl.load(A + cols, mask=cols < N, other=0.).to(tl.float32) a = tl.where(cols < N, a - mean, 0.) _var += a * a var = tl.sum(_var, axis=0) / N @@ -57,192 +57,155 @@ def _layer_norm_fwd_fused( mask = cols < N weight = tl.load(Weight + cols, mask=mask) bias = tl.load(Bias + cols, mask=mask) - a = tl.load(A + cols, mask=mask, other=0., eviction_policy="evict_first").to(tl.float32) + a = tl.load(A + cols, mask=mask, other=0.).to(tl.float32) a_hat = (a - mean) * rstd out = a_hat * weight + bias # # write-back tl.store(Out + cols, out, mask=mask) -# Backward pass (DA + partial DW + partial DB) - +# Backward pass (DX + partial DW + partial DB) @triton.jit -def _layer_norm_bwd_dx_fused( - _DA, - _DOut, - _A, - Weight, - Mean, Rstd, - stride, NumRows, NumCols, eps, - BLOCK_SIZE_N: tl.constexpr, -): +def _layer_norm_bwd_dx_fused(DX, DY, DW, DB, X, W, B, M, V, Lock, stride, N, eps, + GROUP_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr): # position of elements processed by this program - pid = tl.program_id(0) - row = pid - A = _A + row * stride - DOut = _DOut + row * stride - DA = _DA + row * stride - mean = tl.load(Mean + row) - rstd = tl.load(Rstd + row) + row = tl.program_id(0) + cols = tl.arange(0, BLOCK_SIZE_N) + mask = cols < N + # offset data pointers to start at the row of interest + X += row * stride + DY += row * stride + DX += row * stride + # offset locks and weight/bias gradient pointer + # each kernel instance accumulates partial sums for + # DW and DB into one of GROUP_SIZE_M independent buffers + # these buffers stay in the L2, which allow this kernel + # to be fast + lock_id = row % GROUP_SIZE_M + Lock += lock_id + Count = Lock + GROUP_SIZE_M + DW = DW + lock_id * N + cols + DB = DB + lock_id * N + cols # load data to SRAM - _mean1 = tl.zeros([BLOCK_SIZE_N], dtype=tl.float32) - _mean2 = tl.zeros([BLOCK_SIZE_N], dtype=tl.float32) - for off in range(0, NumCols, BLOCK_SIZE_N): - cols = off + tl.arange(0, BLOCK_SIZE_N) - mask = cols < NumCols - a = tl.load(A + cols, mask=mask, other=0).to(tl.float32) - dout = tl.load(DOut + cols, mask=mask, other=0).to(tl.float32) - weight = tl.load(Weight + cols, mask=mask, other=0).to(tl.float32) - a_hat = (a - mean) * rstd - wdout = weight * dout - _mean1 += a_hat * wdout - _mean2 += wdout - mean1 = tl.sum(_mean1, axis=0) / NumCols - mean2 = 0. - mean2 = tl.sum(_mean2, axis=0) / NumCols - for off in range(0, NumCols, BLOCK_SIZE_N): - cols = off + tl.arange(0, BLOCK_SIZE_N) - mask = cols < NumCols - a = tl.load(A + cols, mask=mask, other=0).to(tl.float32) - dout = tl.load(DOut + cols, mask=mask, other=0).to(tl.float32) - weight = tl.load(Weight + cols, mask=mask, other=0).to(tl.float32) - a_hat = (a - mean) * rstd - wdout = weight * dout - da = (wdout - (a_hat * mean1 + mean2)) * rstd - # write-back dx - tl.store(DA + cols, da, mask=mask) - + x = tl.load(X + cols, mask=mask, other=0).to(tl.float32) + dy = tl.load(DY + cols, mask=mask, other=0).to(tl.float32) + w = tl.load(W + cols, mask=mask).to(tl.float32) + mean = tl.load(M + row) + rstd = tl.load(V + row) + # compute dx + xhat = (x - mean) * rstd + wdy = w * dy + xhat = tl.where(mask, xhat, 0.) + wdy = tl.where(mask, wdy, 0.) + mean1 = tl.sum(xhat * wdy, axis=0) / N + mean2 = tl.sum(wdy, axis=0) / N + dx = (wdy - (xhat * mean1 + mean2)) * rstd + # write-back dx + tl.store(DX + cols, dx, mask=mask) + # accumulate partial sums for dw/db + partial_dw = (dy * xhat).to(w.dtype) + partial_db = (dy).to(w.dtype) + while tl.atomic_cas(Lock, 0, 1) == 1: + pass + count = tl.load(Count) + # first store doesn't accumulate + if count == 0: + tl.atomic_xchg(Count, 1) + else: + partial_dw += tl.load(DW, mask=mask) + partial_db += tl.load(DB, mask=mask) + tl.store(DW, partial_dw, mask=mask) + tl.store(DB, partial_db, mask=mask) + # release lock + tl.atomic_xchg(Lock, 0) # Backward pass (total DW + total DB) + + @triton.jit -def _layer_norm_bwd_dwdb( - A, DOut, - Mean, Var, - DW, - DB, - M, N, - BLOCK_SIZE_M: tl.constexpr, - BLOCK_SIZE_N: tl.constexpr, -): +def _layer_norm_bwd_dwdb(DW, DB, FINAL_DW, FINAL_DB, M, N, + BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr): pid = tl.program_id(0) cols = pid * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N) dw = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) db = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32) - UNROLL: tl.constexpr = 4 - for i in range(0, M, BLOCK_SIZE_M * UNROLL): - for j in range(UNROLL): - rows = i + j * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M) - mask = (rows[:, None] < M) & (cols[None, :] < N) - offs = rows[:, None] * N + cols[None, :] - a = tl.load(A + offs, mask=mask, other=0.).to(tl.float32) - dout = tl.load(DOut + offs, mask=mask, other=0.).to(tl.float32) - mean = tl.load(Mean + rows, mask=rows < M, other=0.) - rstd = tl.load(Var + rows, mask=rows < M, other=0.) - a_hat = (a - mean[:, None]) * rstd[:, None] - dw += dout * a_hat - db += dout + for i in range(0, M, BLOCK_SIZE_M): + rows = i + tl.arange(0, BLOCK_SIZE_M) + mask = (rows[:, None] < M) & (cols[None, :] < N) + offs = rows[:, None] * N + cols[None, :] + dw += tl.load(DW + offs, mask=mask, other=0.) + db += tl.load(DB + offs, mask=mask, other=0.) sum_dw = tl.sum(dw, axis=0) sum_db = tl.sum(db, axis=0) - tl.store(DW + cols, sum_dw, mask=cols < N) - tl.store(DB + cols, sum_db, mask=cols < N) + tl.store(FINAL_DW + cols, sum_dw, mask=cols < N) + tl.store(FINAL_DB + cols, sum_db, mask=cols < N) class LayerNorm(torch.autograd.Function): + @staticmethod - def forward(ctx, a, normalized_shape, weight, bias, eps): + def forward(ctx, x, normalized_shape, weight, bias, eps): # allocate output - out = torch.empty_like(a) + y = torch.empty_like(x) # reshape input data into 2D tensor - a_arg = a.reshape(-1, a.shape[-1]) - M, N = a_arg.shape - mean = torch.empty((M,), dtype=torch.float32, device="cuda") - rstd = torch.empty((M,), dtype=torch.float32, device="cuda") + x_arg = x.reshape(-1, x.shape[-1]) + M, N = x_arg.shape + mean = torch.empty((M, ), dtype=torch.float32, device='cuda') + rstd = torch.empty((M, ), dtype=torch.float32, device='cuda') # Less than 64KB per feature: enqueue fused kernel - MAX_FUSED_SIZE = 65536 // a.element_size() + MAX_FUSED_SIZE = 65536 // x.element_size() BLOCK_SIZE = min(MAX_FUSED_SIZE, triton.next_power_of_2(N)) - BLOCK_SIZE = max(BLOCK_SIZE, 128) - BLOCK_SIZE = min(BLOCK_SIZE, 4096) + if N > BLOCK_SIZE: + raise RuntimeError("This layer norm doesn't support feature dim >= 64KB.") # heuristics for number of warps num_warps = min(max(BLOCK_SIZE // 256, 1), 8) - _layer_norm_fwd_fused[(M,)]( - out, - a_arg, - weight, - bias, - mean, rstd, - a_arg.stride(0), N, eps, - BLOCK_SIZE=BLOCK_SIZE, - num_warps=num_warps, - ) - ctx.save_for_backward( - a, weight, bias, mean, rstd, - ) + # enqueue kernel + _layer_norm_fwd_fused[(M,)](x_arg, y, weight, bias, mean, rstd, + x_arg.stride(0), N, eps, + BLOCK_SIZE=BLOCK_SIZE, num_warps=num_warps) + ctx.save_for_backward(x, weight, bias, mean, rstd) ctx.BLOCK_SIZE = BLOCK_SIZE ctx.num_warps = num_warps ctx.eps = eps - if hasattr(bias, "config"): - assert bias.config.grad_scale_name == weight.config.grad_scale_name - grad_scale_name = bias.config.grad_scale_name - else: - grad_scale_name = None - ctx.grad_scale_gain_bias_name = grad_scale_name - return out + return y @staticmethod - def backward(ctx, dout): - assert dout.is_contiguous() - a, weight, bias, mean, var = ctx.saved_tensors + def backward(ctx, dy): + x, w, b, m, v = ctx.saved_tensors # heuristics for amount of parallel reduction stream for DG/DB - N = weight.shape[0] + N = w.shape[0] + GROUP_SIZE_M = 64 + if N <= 8192: GROUP_SIZE_M = 96 + if N <= 4096: GROUP_SIZE_M = 128 + if N <= 1024: GROUP_SIZE_M = 256 # allocate output - da = torch.empty_like(dout) + locks = torch.zeros(2 * GROUP_SIZE_M, dtype=torch.int32, device='cuda') + _dw = torch.empty((GROUP_SIZE_M, w.shape[0]), dtype=x.dtype, device=w.device) + _db = torch.empty((GROUP_SIZE_M, w.shape[0]), dtype=x.dtype, device=w.device) + dw = torch.empty((w.shape[0],), dtype=w.dtype, device=w.device) + db = torch.empty((w.shape[0],), dtype=w.dtype, device=w.device) + dx = torch.empty_like(dy) # enqueue kernel using forward pass heuristics # also compute partial sums for DW and DB - x_arg = a.reshape(-1, a.shape[-1]) + x_arg = x.reshape(-1, x.shape[-1]) M, N = x_arg.shape - dweight = torch.empty((weight.shape[0],), dtype=weight.dtype, device=weight.device) - dbias = torch.empty((weight.shape[0],), dtype=weight.dtype, device=weight.device) - _layer_norm_bwd_dx_fused[(M,)]( - da, - dout, - a, - weight, - mean, var, - x_arg.stride(0), M, N, - ctx.eps, - BLOCK_SIZE_N=ctx.BLOCK_SIZE, - num_warps=ctx.num_warps, - ) - if N > 10240: - BLOCK_SIZE_N = 128 - BLOCK_SIZE_M = 32 - num_warps = 4 - else: - # maximize occupancy for small N - BLOCK_SIZE_N = 16 - BLOCK_SIZE_M = 16 - num_warps = 8 - grid = lambda meta: [triton.cdiv(N, meta["BLOCK_SIZE_N"])] - _layer_norm_bwd_dwdb[grid]( - a, dout, - mean, var, - dweight, - dbias, - M, - N, - BLOCK_SIZE_M=BLOCK_SIZE_M, - BLOCK_SIZE_N=BLOCK_SIZE_N, - num_warps=num_warps - ) - return (da, None, dweight, dbias, None) + _layer_norm_bwd_dx_fused[(M,)](dx, dy, _dw, _db, x, w, b, m, v, locks, + x_arg.stride(0), N, ctx.eps, + BLOCK_SIZE_N=ctx.BLOCK_SIZE, + GROUP_SIZE_M=GROUP_SIZE_M, + num_warps=ctx.num_warps) + grid = lambda meta: [triton.cdiv(N, meta['BLOCK_SIZE_N'])] + # accumulate partial sums in separate kernel + _layer_norm_bwd_dwdb[grid](_dw, _db, dw, db, GROUP_SIZE_M, N, + BLOCK_SIZE_M=32, + BLOCK_SIZE_N=128) + return dx, None, dw, db, None -def layer_norm(a, normalized_shape, weight, bias, eps): - return LayerNorm.apply(a, normalized_shape, weight, bias, eps) +layer_norm = LayerNorm.apply def test_layer_norm(M, N, dtype, eps=1e-5, device='cuda'): - torch.manual_seed(0) # create data x_shape = (M, N) w_shape = (x_shape[-1], ) @@ -277,11 +240,11 @@ def test_layer_norm(M, N, dtype, eps=1e-5, device='cuda'): line_names=['Triton', 'Torch'] + (['Apex'] if HAS_APEX else []), styles=[('blue', '-'), ('green', '-'), ('orange', '-')], ylabel='GB/s', - plot_name='layer-norm', - args={'M': 4096, 'dtype': torch.float16, 'mode': 'forward'} + plot_name='layer-norm-backward', + args={'M': 4096, 'dtype': torch.float16, 'mode': 'backward'} ) ) -def bench_layer_norm(M, N, dtype, provider, mode, eps=1e-5, device='cuda'): +def bench_layer_norm(M, N, dtype, provider, mode='backward', eps=1e-5, device='cuda'): # create data x_shape = (M, N) w_shape = (x_shape[-1], ) @@ -311,5 +274,5 @@ def bench_layer_norm(M, N, dtype, provider, mode, eps=1e-5, device='cuda'): return gbps(ms), gbps(max_ms), gbps(min_ms) -# test_layer_norm(1151, 8192, torch.float16) -bench_layer_norm.run(save_path='.', print_data=True) +test_layer_norm(1151, 8192, torch.float16) +# bench_layer_norm.run(save_path='.', print_data=True) diff --git a/python/tutorials/06-fused-attention.py b/python/tutorials/06-fused-attention.py index 996d9df4048a..aef0a463f9ca 100644 --- a/python/tutorials/06-fused-attention.py +++ b/python/tutorials/06-fused-attention.py @@ -15,7 +15,7 @@ @triton.jit def _fwd_kernel( Q, K, V, sm_scale, - TMP, L, M, # NOTE: TMP is a scratchpad buffer to workaround a compiler bug + TMP, L, M, # NOTE: TMP is a scratchpad buffer to work around a compiler bug Out, stride_qz, stride_qh, stride_qm, stride_qk, stride_kz, stride_kh, stride_kn, stride_kk, @@ -39,7 +39,6 @@ def _fwd_kernel( k_ptrs = K + off_k v_ptrs = V + off_v # initialize pointer to m and l - t_ptrs = TMP + off_hz * N_CTX + offs_m m_i = tl.zeros([BLOCK_M], dtype=tl.float32) - float("inf") l_i = tl.zeros([BLOCK_M], dtype=tl.float32) acc = tl.zeros([BLOCK_M, BLOCK_DMODEL], dtype=tl.float32) @@ -47,11 +46,11 @@ def _fwd_kernel( q = tl.load(q_ptrs) # loop over k, v and update accumulator for start_n in range(0, (start_m + 1) * BLOCK_M, BLOCK_N): - start_n = tl.multiple_of(start_n, BLOCK_N) + # start_n = tl.multiple_of(start_n, BLOCK_N) # -- compute qk ---- k = tl.load(k_ptrs + start_n * stride_kn) qk = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - qk += tl.dot(q, k, trans_b=True) + qk += tl.dot(q, tl.trans(k)) qk *= sm_scale qk += tl.where(offs_m[:, None] >= (start_n + offs_n[None, :]), 0, float("-inf")) # -- compute m_ij, p, l_ij @@ -69,8 +68,6 @@ def _fwd_kernel( p = p * p_scale[:, None] # scale acc acc_scale = l_i / l_i_new * alpha - tl.store(t_ptrs, acc_scale) - acc_scale = tl.load(t_ptrs) # BUG: have to store and immediately load acc = acc * acc_scale[:, None] # update acc v = tl.load(v_ptrs + start_n * stride_vk) @@ -168,26 +165,26 @@ def _bwd_kernel( q = tl.load(q_ptrs) # recompute p = softmax(qk, dim=-1).T # NOTE: `do` is pre-divided by `l`; no normalization here - qk = tl.dot(q, k, trans_b=True) + qk = tl.dot(q, tl.trans(k)) qk = tl.where(offs_m_curr[:, None] >= (offs_n[None, :]), qk, float("-inf")) m = tl.load(m_ptrs + offs_m_curr) p = tl.exp(qk * sm_scale - m[:, None]) # compute dv do = tl.load(do_ptrs) - dv += tl.dot(p.to(tl.float16), do, trans_a=True) + dv += tl.dot(tl.trans(p.to(tl.float16)), do) # compute dp = dot(v, do) Di = tl.load(D_ptrs + offs_m_curr) dp = tl.zeros([BLOCK_M, BLOCK_N], dtype=tl.float32) - Di[:, None] - dp += tl.dot(do, v, trans_b=True) + dp += tl.dot(do, tl.trans(v)) # compute ds = p * (dp - delta[:, None]) ds = p * dp * sm_scale # compute dk = dot(ds.T, q) - dk += tl.dot(ds.to(tl.float16), q, trans_a=True) - # # compute dq - dq = tl.load(dq_ptrs, eviction_policy="evict_last") + dk += tl.dot(tl.trans(ds.to(tl.float16)), q) + # compute dq + dq = tl.load(dq_ptrs) dq += tl.dot(ds.to(tl.float16), k) - tl.store(dq_ptrs, dq, eviction_policy="evict_last") - # # increment pointers + tl.store(dq_ptrs, dq) + # increment pointers dq_ptrs += BLOCK_M * stride_qm q_ptrs += BLOCK_M * stride_qm do_ptrs += BLOCK_M * stride_qm @@ -198,6 +195,9 @@ def _bwd_kernel( tl.store(dk_ptrs, dk) +empty = torch.empty(128, device="cuda") + + class _attention(torch.autograd.Function): @staticmethod @@ -208,7 +208,7 @@ def forward(ctx, q, k, v, sm_scale): assert Lq == Lk and Lk == Lv assert Lk in {16, 32, 64, 128} o = torch.empty_like(q) - grid = (triton.cdiv(q.shape[2], BLOCK), q.shape[0] * q.shape[1]) + grid = (triton.cdiv(q.shape[2], BLOCK), q.shape[0] * q.shape[1], 1) tmp = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32) L = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32) m = torch.empty((q.shape[0] * q.shape[1], q.shape[2]), device=q.device, dtype=torch.float32) @@ -227,6 +227,7 @@ def forward(ctx, q, k, v, sm_scale): BLOCK_DMODEL=Lk, num_warps=num_warps, num_stages=1, ) + ctx.save_for_backward(q, k, v, o, L, m) ctx.BLOCK = BLOCK ctx.grid = grid @@ -272,13 +273,13 @@ def backward(ctx, do): attention = _attention.apply -@pytest.mark.parametrize('Z, H, N_CTX, D_HEAD', [(3, 2, 2048, 64)]) +@pytest.mark.parametrize('Z, H, N_CTX, D_HEAD', [(4, 48, 1024, 64)]) def test_op(Z, H, N_CTX, D_HEAD, dtype=torch.float16): torch.manual_seed(20) - q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5).requires_grad_() - k = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5).requires_grad_() - v = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0, std=.5).requires_grad_() - sm_scale = 0.3 + q = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0.1, std=0.2).requires_grad_() + k = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0.4, std=0.2).requires_grad_() + v = torch.empty((Z, H, N_CTX, D_HEAD), dtype=dtype, device="cuda").normal_(mean=0.3, std=0.2).requires_grad_() + sm_scale = 0.2 dout = torch.randn_like(q) # reference implementation M = torch.tril(torch.ones((N_CTX, N_CTX), device="cuda")) @@ -287,13 +288,16 @@ def test_op(Z, H, N_CTX, D_HEAD, dtype=torch.float16): for h in range(H): p[:, :, M == 0] = float("-inf") p = torch.softmax(p.float(), dim=-1).half() + # p = torch.exp(p) ref_out = torch.matmul(p, v) ref_out.backward(dout) ref_dv, v.grad = v.grad.clone(), None ref_dk, k.grad = k.grad.clone(), None ref_dq, q.grad = q.grad.clone(), None - # triton implementation + # # triton implementation tri_out = attention(q, k, v, sm_scale) + # print(ref_out) + # print(tri_out) tri_out.backward(dout) tri_dv, v.grad = v.grad.clone(), None tri_dk, k.grad = k.grad.clone(), None @@ -323,7 +327,7 @@ def test_op(Z, H, N_CTX, D_HEAD, dtype=torch.float16): ylabel='ms', plot_name=f'fused-attention-batch{BATCH}-head{N_HEADS}-d{D_HEAD}-{mode}', args={'H': N_HEADS, 'BATCH': BATCH, 'D_HEAD': D_HEAD, 'dtype': torch.float16, 'mode': mode} -) for mode in ['bwd']] +) for mode in ['fwd']] @triton.testing.perf_report(configs) @@ -356,5 +360,4 @@ def bench_flash_attention(BATCH, H, N_CTX, D_HEAD, mode, provider, dtype=torch.f ms = triton.testing.do_bench(fn, percentiles=None, warmup=warmup, rep=rep) return ms -# only works on A100 at the moment # bench_flash_attention.run(save_path='.', print_data=True) diff --git a/python/tutorials/07-libdevice-function.py b/python/tutorials/07-libdevice-function.py deleted file mode 100644 index 19e6cac7a7d6..000000000000 --- a/python/tutorials/07-libdevice-function.py +++ /dev/null @@ -1,74 +0,0 @@ -""" -Libdevice function -=============== -Triton can invoke a custom function from an external library. -In this example, we will use the `libdevice` library to apply `asin` on a tensor. -Please refer to https://docs.nvidia.com/cuda/libdevice-users-guide/index.html regarding the semantics of all available libdevice functions. - -In `trition/language/libdevice.py`, we try to aggregate functions with the same computation but different data types together. -For example, both `__nv_asin` and `__nvasinf` calculate the principal value of the arc sine of the input, but `__nv_asin` operates on `double` and `__nv_asinf` operates on `float`. -Using triton, you can simply call `tl.libdevice.asin`. -triton automatically selects the correct underlying device function to invoke based on input and output types. -""" - -# %% -# asin Kernel -# -------------------------- - -import torch - -import triton -import triton.language as tl - - -@triton.jit -def asin_kernel( - x_ptr, - y_ptr, - n_elements, - BLOCK_SIZE: tl.constexpr, -): - pid = tl.program_id(axis=0) - block_start = pid * BLOCK_SIZE - offsets = block_start + tl.arange(0, BLOCK_SIZE) - mask = offsets < n_elements - x = tl.load(x_ptr + offsets, mask=mask) - x = tl.libdevice.asin(x) - tl.store(y_ptr + offsets, x, mask=mask) - -# %% -# Using the default libdevice library path -# -------------------------- -# We can use the default libdevice library path encoded in `triton/language/libdevice.py` - - -torch.manual_seed(0) -size = 98432 -x = torch.rand(size, device='cuda') -output_triton = torch.zeros(size, device='cuda') -output_torch = torch.asin(x) -assert x.is_cuda and output_triton.is_cuda -n_elements = output_torch.numel() -grid = lambda meta: (triton.cdiv(n_elements, meta['BLOCK_SIZE']),) -asin_kernel[grid](x, output_triton, n_elements, BLOCK_SIZE=1024) -print(output_torch) -print(output_triton) -print( - f'The maximum difference between torch and triton is ' - f'{torch.max(torch.abs(output_torch - output_triton))}' -) - -# %% -# Customize the libdevice library path -# -------------------------- -# We can also customize the libdevice library path by passing the path to the `libdevice` library to the `asin` kernel. - -output_triton = torch.empty_like(x) -asin_kernel[grid](x, output_triton, n_elements, BLOCK_SIZE=1024, - extern_libs={'libdevice': '/usr/local/cuda/nvvm/libdevice/libdevice.10.bc'}) -print(output_torch) -print(output_triton) -print( - f'The maximum difference between torch and triton is ' - f'{torch.max(torch.abs(output_torch - output_triton))}' -) diff --git a/test/Analysis/test-alias.mlir b/test/Analysis/test-alias.mlir new file mode 100644 index 000000000000..6a4407a31c4d --- /dev/null +++ b/test/Analysis/test-alias.mlir @@ -0,0 +1,205 @@ +// RUN: triton-opt %s --mlir-disable-threading -test-print-alias -split-input-file 2>&1 | FileCheck %s + +#AL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [4, 8], warpsPerCTA = [4, 1], order = [1, 0]}> +#BL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [1, 32], warpsPerCTA = [4, 1], order = [1, 0]}> +#A_SHARED = #triton_gpu.shared<{vec = 2, perPhase = 2, maxPhase = 4, order = [1, 0]}> +#B_SHARED = #triton_gpu.shared<{vec = 2, perPhase = 2, maxPhase = 4, order = [1, 0]}> +#C = #triton_gpu.mma<{versionMajor = 2, warpsPerCTA = [4, 1]}> +#A_DOT = #triton_gpu.dot_op<{opIdx = 0, parent = #C}> +#B_DOT = #triton_gpu.dot_op<{opIdx = 1, parent = #C}> + +// CHECK-LABEL: matmul_loop +// There shouldn't be any aliasing with the dot op encoding. +func @matmul_loop(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr) { + %a_ptr_init = tt.broadcast %A : (!tt.ptr) -> tensor<128x32x!tt.ptr, #AL> + %b_ptr_init = tt.broadcast %B : (!tt.ptr) -> tensor<32x128x!tt.ptr, #BL> + %a_mask = arith.constant dense : tensor<128x32xi1, #AL> + %a_other = arith.constant dense<0.00e+00> : tensor<128x32xf16, #AL> + %b_mask = arith.constant dense : tensor<32x128xi1, #BL> + %b_other = arith.constant dense<0.00e+00> : tensor<32x128xf16, #BL> + %c_init = arith.constant dense<0.00e+00> : tensor<128x128xf32, #C> + %a_off = arith.constant dense<4> : tensor<128x32xi32, #AL> + %b_off = arith.constant dense<4> : tensor<32x128xi32, #BL> + scf.for %iv = %lb to %ub step %step iter_args(%a_ptr = %a_ptr_init, %b_ptr = %b_ptr_init, %prev_c = %c_init) -> (tensor<128x32x!tt.ptr, #AL>, tensor<32x128x!tt.ptr, #BL>, tensor<128x128xf32, #C>) { + %a_ = tt.load %a_ptr, %a_mask, %a_other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x32xf16, #AL> + %a = triton_gpu.convert_layout %a_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A_DOT> + %b_ = tt.load %b_ptr, %b_mask, %b_other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<32x128xf16, #BL> + %b = triton_gpu.convert_layout %b_ : (tensor<32x128xf16, #BL>) -> tensor<32x128xf16, #B_DOT> + %c = tt.dot %a, %b, %prev_c {transA = false, transB = false, allowTF32 = true} : tensor<128x32xf16, #A_DOT> * tensor<32x128xf16, #B_DOT> -> tensor<128x128xf32, #C> + + %next_a_ptr = tt.addptr %a_ptr, %a_off : tensor<128x32x!tt.ptr, #AL>, tensor<128x32xi32, #AL> + %next_b_ptr = tt.addptr %b_ptr, %b_off : tensor<32x128x!tt.ptr, #BL>, tensor<32x128xi32, #BL> + scf.yield %next_a_ptr, %next_b_ptr, %c : tensor<128x32x!tt.ptr, #AL>, tensor<32x128x!tt.ptr, #BL>, tensor<128x128xf32, #C> + } + return +} + +// CHECK-LABEL: alloc +func @alloc(%A : !tt.ptr) { + // CHECK: %cst -> %cst + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + %cst1 = arith.constant dense<0.000000e+00> : tensor<16x32xf16, #AL> + // CHECK: %0 -> %0 + %cst2 = triton_gpu.alloc_tensor : tensor<16x16xf16, #A_SHARED> + return +} + +// CHECK-LABEL: convert +func @convert(%A : !tt.ptr) { + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #AL> + // CHECK: %0 -> %0 + %cst1 = triton_gpu.convert_layout %cst0 : (tensor<16x16xf16, #AL>) -> tensor<16x16xf16, #A_SHARED> + return +} + +// CHECK-LABEL: insert_slice_async +func @insert_slice_async(%A : !tt.ptr, %i1 : i1) { + %a_ptr = tt.broadcast %A : (!tt.ptr) -> tensor<16x16x!tt.ptr, #AL> + %mask = tt.splat %i1 : (i1) -> tensor<16x16xi1, #AL> + %other = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #AL> + // CHECK: %cst_0 -> %cst_0 + %tensor = arith.constant dense<0.000000e+00> : tensor<1x16x16xf16, #A_SHARED> + %index = arith.constant 0 : i32 + // CHECK: %2 -> %cst_0 + %a = triton_gpu.insert_slice_async %a_ptr, %tensor, %index, %mask, %other {axis = 0 : i32, cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<16x16x!tt.ptr, #AL> -> tensor<1x16x16xf16, #A_SHARED> + return +} + +// CHECK-LABEL: insert_slice +func @insert_slice(%A : !tt.ptr, %i1 : i1) { + %a_ptr = tt.broadcast %A : (!tt.ptr) -> tensor<16x16x!tt.ptr, #AL> + %mask = tt.splat %i1 : (i1) -> tensor<16x16xi1, #AL> + %other = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #AL> + // CHECK: %cst_0 -> %cst_0 + %tensor = arith.constant dense<0.000000e+00> : tensor<1x16x16xf16, #A_SHARED> + %index = arith.constant 0 : index + %a = tt.load %a_ptr, %mask, %other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<16x16xf16, #AL> + // CHECK: %3 -> %cst_0 + %b = tensor.insert_slice %a into %tensor[%index, 0, 0][1, 16, 16][1, 1, 1]: tensor<16x16xf16, #AL> into tensor<1x16x16xf16, #A_SHARED> + return +} + +// CHECK-LABEL: extract_slice +func @extract_slice(%A : !tt.ptr) { + // CHECK: %cst -> %cst + %cst0 = arith.constant dense<0.000000e+00> : tensor<1x16x16xf16, #A_SHARED> + %index = arith.constant 0 : index + // CHECK-NEXT: %0 -> %cst + %cst1 = tensor.extract_slice %cst0[%index, 0, 0][1, 16, 16][1, 1, 1] : tensor<1x16x16xf16, #A_SHARED> to tensor<16x16xf16, #A_SHARED> + return +} + +// CHECK-LABEL: if_cat +func @if_cat(%i1 : i1) { + // CHECK: %cst -> %cst + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK: %cst_0 -> %cst_0 + %cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK: %0 -> %1,%1 + %cst2 = scf.if %i1 -> tensor<32x16xf16, #A_SHARED> { + // CHECK: %1 -> %1 + %a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + scf.yield %a : tensor<32x16xf16, #A_SHARED> + } else { + // CHECK: %1 -> %1 + %b = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + scf.yield %b : tensor<32x16xf16, #A_SHARED> + } + return +} + +// CHECK-LABEL: if_alias +func @if_alias(%i1 : i1) { + // CHECK: %cst -> %cst + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: %cst_0 -> %cst_0 + %cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: %0 -> %cst,%cst_0 + %cst2 = scf.if %i1 -> tensor<16x16xf16, #A_SHARED> { + scf.yield %cst0 : tensor<16x16xf16, #A_SHARED> + } else { + scf.yield %cst1 : tensor<16x16xf16, #A_SHARED> + } + return +} + +// CHECK-LABEL: for +func @for(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr) { + // CHECK: %cst -> %cst + %a_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: %cst_0 -> %cst_0 + %b_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: %cst_1 -> %cst_1 + %c_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: %arg6 -> %cst + // CHECK-NEXT: %arg7 -> %cst_0 + // CHECK-NEXT: %arg8 -> %cst_1 + // CHECK-NEXT: %0#0 -> %cst,%cst_0 + // CHECK-NEXT: %0#1 -> %cst,%cst_0 + // CHECK-NEXT: %0#2 -> %cst,%cst_0 + %a_shared, %b_shared, %c_shared = scf.for %iv = %lb to %ub step %step iter_args(%a_shared = %a_shared_init, %b_shared = %b_shared_init, %c_shared = %c_shared_init) -> (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) { + scf.yield %b_shared, %a_shared, %a_shared : tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED> + } + return +} + +// CHECK-LABEL: for_if +func @for_if(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr, %i1 : i1) { + // CHECK: %cst -> %cst + %a_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: %cst_0 -> %cst_0 + %b_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: %cst_1 -> %cst_1 + %c_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: %arg7 -> %cst + // CHECK-NEXT: %arg8 -> %cst_0 + // CHECK-NEXT: %arg9 -> %cst_1 + // CHECK-NEXT: %0#0 -> %cst,%cst_0 + // CHECK-NEXT: %0#1 -> %cst,%cst_0 + // CHECK-NEXT: %0#2 -> %cst,%cst_0 + %a_shared, %b_shared, %c_shared = scf.for %iv = %lb to %ub step %step iter_args(%a_shared = %a_shared_init, %b_shared = %b_shared_init, %c_shared = %c_shared_init) -> (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) { + scf.if %i1 { + %index = arith.constant 8 : index + // CHECK-NEXT: %1 -> %cst,%cst_0 + %cst0 = tensor.extract_slice %a_shared[%index, 0][1, 32][1, 1] : tensor<128x32xf16, #A_SHARED> to tensor<32xf16, #A_SHARED> + scf.yield + } + scf.yield %b_shared, %a_shared, %a_shared : tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED> + } + return +} + +// CHECK-LABEL: for_if_for +func @for_if_for(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr, %i1 : i1) { + // CHECK: %cst -> %cst + %a_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: %cst_0 -> %cst_0 + %b_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: %cst_1 -> %cst_1 + %c_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: %arg7 -> %cst + // CHECK-NEXT: %arg8 -> %cst_0 + // CHECK-NEXT: %arg9 -> %cst_1 + // CHECK-NEXT: %0#0 -> %cst + // CHECK-NEXT: %0#1 -> %cst_0 + // CHECK-NEXT: %0#2 -> %cst_2,%cst_2 + %a_shared, %b_shared, %c_shared = scf.for %iv = %lb to %ub step %step iter_args(%a_shared = %a_shared_init, %b_shared = %b_shared_init, %c_shared = %c_shared_init) -> (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) { + // CHECK-NEXT: %arg11 -> %cst_1,%cst_2,%cst_2 + // CHECK-NEXT: %1 -> %cst_2,%cst_2 + %c_shared_next = scf.for %jv = %lb to %ub step %step iter_args(%c_shared_next = %c_shared) -> (tensor<128x32xf16, #A_SHARED>) { + // CHECK-NEXT: %2 -> %cst_2,%cst_2 + %c_shared_next_next = scf.if %i1 -> tensor<128x32xf16, #A_SHARED> { + // CHECK-NEXT: %cst_2 -> %cst_2 + %cst0 = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + scf.yield %cst0 : tensor<128x32xf16, #A_SHARED> + } else { + // CHECK-NEXT: %cst_2 -> %cst_2 + %cst0 = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + scf.yield %cst0 : tensor<128x32xf16, #A_SHARED> + } + scf.yield %c_shared_next_next : tensor<128x32xf16, #A_SHARED> + } + scf.yield %a_shared, %b_shared, %c_shared_next : tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED> + } + return +} diff --git a/test/Analysis/test-alignment.mlir b/test/Analysis/test-alignment.mlir new file mode 100644 index 000000000000..312194f21a9a --- /dev/null +++ b/test/Analysis/test-alignment.mlir @@ -0,0 +1,141 @@ +// RUN: triton-opt %s -test-print-alignment -split-input-file 2>&1 | FileCheck %s + +func @permute_2d(%arg0: !tt.ptr {tt.divisibility = 16 : i32}, %arg1: i32 {tt.divisibility = 16 : i32}, %arg2: !tt.ptr {tt.divisibility = 16 : i32}, %arg3: i32 {tt.divisibility = 16 : i32}) { + // CHECK: Contiguity: [1, 1] ; Divisibility: [1, 1] ; Constancy: [1, 1] + %cst = arith.constant dense : tensor<128x128xi1> + // CHECK-NEXT: Contiguity: [1, 1] ; Divisibility: [1, 1] ; Constancy: [1, 1] + %cst_0 = arith.constant dense<0.000000e+00> : tensor<128x128xf32> + // CHECK-NEXT: Contiguity: [128] ; Divisibility: [65536] ; Constancy: [1] + %0 = tt.make_range {end = 128 : i32, start = 0 : i32} : tensor<128xi32> + // CHECK-NEXT: Contiguity: [128] ; Divisibility: [65536] ; Constancy: [1] + %1 = tt.make_range {end = 128 : i32, start = 0 : i32} : tensor<128xi32> + // CHECK-NEXT: Contiguity: [128, 1] ; Divisibility: [65536, 1] ; Constancy: [1, 1] + %2 = tt.expand_dims %0 {axis = 1 : i32} : (tensor<128xi32>) -> tensor<128x1xi32> + // CHECK-NEXT: Contiguity: [1, 1] ; Divisibility: [16, 16] ; Constancy: [128, 1] + %3 = tt.splat %arg1 : (i32) -> tensor<128x1xi32> + // CHECK-NEXT: Contiguity: [1, 1] ; Divisibility: [1048576, 16] ; Constancy: [1, 1] + %4 = arith.muli %2, %3 : tensor<128x1xi32> + // CHECK-NEXT: Contiguity: [1, 1] ; Divisibility: [16, 16] ; Constancy: [128, 1] + %5 = tt.splat %arg0 : (!tt.ptr) -> tensor<128x1x!tt.ptr> + // CHECK-NEXT: Contiguity: [1, 1] ; Divisibility: [16, 16] ; Constancy: [1, 1] + %6 = tt.addptr %5, %4 : tensor<128x1x!tt.ptr>, tensor<128x1xi32> + // CHECK-NEXT: Contiguity: [1, 128] ; Divisibility: [1, 65536] ; Constancy: [1, 1] + %7 = tt.expand_dims %1 {axis = 0 : i32}: (tensor<128xi32>) -> tensor<1x128xi32> + // CHECK-NEXT: Contiguity: [1, 1] ; Divisibility: [16, 16] ; Constancy: [1, 128] + %8 = tt.broadcast %6 : (tensor<128x1x!tt.ptr>) -> tensor<128x128x!tt.ptr> + // CHECK-NEXT: Contiguity: [1, 128] ; Divisibility: [1, 65536] ; Constancy: [128, 1] + %9 = tt.broadcast %7 : (tensor<1x128xi32>) -> tensor<128x128xi32> + // CHECK-NEXT: Contiguity: [1, 128] ; Divisibility: [1, 16] ; Constancy: [1, 1] + %10 = tt.addptr %8, %9 : tensor<128x128x!tt.ptr>, tensor<128x128xi32> + // CHECK-NEXT: Contiguity: [128, 1] ; Divisibility: [65536, 1] ; Constancy: [1, 1] + %11 = tt.expand_dims %0 {axis = 1 : i32}: (tensor<128xi32>) -> tensor<128x1xi32> + // CHECK-NEXT: Contiguity: [1, 1] ; Divisibility: [16, 16] ; Constancy: [128, 1] + %12 = tt.splat %arg2 : (!tt.ptr) -> tensor<128x1x!tt.ptr> + // CHECK-NEXT: Contiguity: [128, 1] ; Divisibility: [16, 1] ; Constancy: [1, 1] + %13 = tt.addptr %12, %11 : tensor<128x1x!tt.ptr>, tensor<128x1xi32> + // CHECK-NEXT: Contiguity: [1, 128] ; Divisibility: [1, 65536] ; Constancy: [1, 1] + %14 = tt.expand_dims %1 {axis = 0 : i32} : (tensor<128xi32>) -> tensor<1x128xi32> + // CHECK-NEXT: Contiguity: [1, 1] ; Divisibility: [16, 16] ; Constancy: [1, 128] + %15 = tt.splat %arg3 : (i32) -> tensor<1x128xi32> + // CHECK-NEXT: Contiguity: [1, 1] ; Divisibility: [16, 1048576] ; Constancy: [1, 1] + %16 = arith.muli %14, %15 : tensor<1x128xi32> + // CHECK-NEXT: Contiguity: [128, 1] ; Divisibility: [16, 1] ; Constancy: [1, 128] + %17 = tt.broadcast %13 : (tensor<128x1x!tt.ptr>) -> tensor<128x128x!tt.ptr> + // CHECK-NEXT: Contiguity: [1, 1] ; Divisibility: [16, 1048576] ; Constancy: [128, 1] + %18 = tt.broadcast %16 : (tensor<1x128xi32>) -> tensor<128x128xi32> + // CHECK-NEXT: Contiguity: [128, 1] ; Divisibility: [16, 1] ; Constancy: [1, 1] + %19 = tt.addptr %17, %18 : tensor<128x128x!tt.ptr>, tensor<128x128xi32> + // CHECK-NEXT: Contiguity: [1, 1] ; Divisibility: [1, 1] ; Constancy: [1, 1] + %20 = tt.load %10, %cst, %cst_0 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x128xf32> + tt.store %19, %20, %cst : tensor<128x128xf32> + return +} + +// ----- + +module { + +// This is a tiny test for verifying StoreOp-related alignment, It simply store a constant to a buffer. +func @store_constant_align(%addr: !tt.ptr {tt.divisibility = 16 : i32}, %n: i32 {tt.divisibility = 16 : i32}) { + // CHECK: Contiguity: [1] ; Divisibility: [1] ; Constancy: [1] + %pid = tt.get_program_id {axis = 0 : i32} : i32 + // CHECK-NEXT: Contiguity: [1] ; Divisibility: [128] ; Constancy: [1] + %c128_i32 = arith.constant 128 : i32 + // CHECK-NEXT: Contiguity: [1] ; Divisibility: [128] ; Constancy: [1] + %1 = arith.muli %pid, %c128_i32 : i32 + // CHECK-NEXT: Contiguity: [128] ; Divisibility: [65536] ; Constancy: [1] + %2 = tt.make_range {end = 128 : i32, start = 0 : i32} : tensor<128xi32> + // CHECK-NEXT: Contiguity: [1] ; Divisibility: [128] ; Constancy: [128] + %3 = tt.splat %1 : (i32) -> tensor<128xi32> + // CHECK-NEXT: Contiguity: [128] ; Divisibility: [128] ; Constancy: [1] + %4 = arith.addi %3, %2 : tensor<128xi32> + // CHECK-NEXT: Contiguity: [1] ; Divisibility: [16] ; Constancy: [128] + %5 = tt.splat %addr : (!tt.ptr) -> tensor<128x!tt.ptr> + // CHECK-NEXT: Contiguity: [128] ; Divisibility: [16] ; Constancy: [1] + %6 = tt.addptr %5, %4 : tensor<128x!tt.ptr>, tensor<128xi32> + // CHECK-NEXT: Contiguity: [1] ; Divisibility: [16] ; Constancy: [128] + %9 = tt.splat %n : (i32) -> tensor<128xi32> + // CHECK-NEXT: Contiguity: [1] ; Divisibility: [128] ; Constancy: [16] + %mask = arith.cmpi slt, %4, %9 : tensor<128xi32> + // CHECK-NEXT: Contiguity: [1] ; Divisibility: [1] ; Constancy: [1] + %cst = arith.constant dense<0.0> : tensor<128xf32> + tt.store %5, %cst, %mask : tensor<128xf32> + return +} + +} + +// ----- + +// This IR is dumped from vecadd test. +// Note, the hint {tt.divisibility = 16 : i32} for %n_elements affects the alignment of mask. +func @vecadd_mask_align_16(%arg0: !tt.ptr {tt.divisibility = 16 : i32}, %arg1: !tt.ptr {tt.divisibility = 16 : i32}, %arg2: !tt.ptr {tt.divisibility = 16 : i32}, %n_elements: i32 {tt.divisibility = 16 : i32}) { + %c64_i32 = arith.constant 64 : i32 + %0 = tt.get_program_id {axis = 0 : i32} : i32 + %1 = arith.muli %0, %c64_i32 : i32 + %2 = tt.make_range {end = 64 : i32, start = 0 : i32} : tensor<64xi32> + %3 = tt.splat %1 : (i32) -> tensor<64xi32> + %4 = arith.addi %3, %2 : tensor<64xi32> + %5 = tt.splat %arg0 : (!tt.ptr) -> tensor<64x!tt.ptr> + %6 = tt.addptr %5, %4 : tensor<64x!tt.ptr>, tensor<64xi32> + %7 = tt.splat %arg1 : (!tt.ptr) -> tensor<64x!tt.ptr> + %8 = tt.addptr %7, %4 : tensor<64x!tt.ptr>, tensor<64xi32> + %9 = tt.splat %n_elements : (i32) -> tensor<64xi32> + // CHECK: Contiguity: [1] ; Divisibility: [64] ; Constancy: [16] ( %{{.*}} = arith.cmpi slt, %{{.*}}, %{{.*}} : tensor<64xi32> ) + %mask = arith.cmpi slt, %4, %9 : tensor<64xi32> + %11 = tt.load %6, %mask {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<64xf32> + %12 = tt.load %8, %mask {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<64xf32> + %13 = arith.addf %11, %12 : tensor<64xf32> + %14 = tt.splat %arg2 : (!tt.ptr) -> tensor<64x!tt.ptr> + // CHECK: Contiguity: [64] ; Divisibility: [16] ; Constancy: [1] ( %{{.*}} = tt.addptr %{{.*}}, %{{.*}} : tensor<64x!tt.ptr>, tensor<64xi32> ) + %15 = tt.addptr %14, %4 : tensor<64x!tt.ptr>, tensor<64xi32> + tt.store %15, %13, %mask : tensor<64xf32> + return +} + +// ----- + +// This IR is dumped from vecadd test. +// Note, there is no divisibility hint for %n_elements, Triton should assume its divisibility to be 1 by default. +func @vecadd_mask_align_1(%arg0: !tt.ptr {tt.divisibility = 16 : i32}, %arg1: !tt.ptr {tt.divisibility = 16 : i32}, %arg2: !tt.ptr {tt.divisibility = 16 : i32}, %n_elements: i32) { + %c64_i32 = arith.constant 64 : i32 + %0 = tt.get_program_id {axis = 0 : i32} : i32 + %1 = arith.muli %0, %c64_i32 : i32 + %2 = tt.make_range {end = 64 : i32, start = 0 : i32} : tensor<64xi32> + %3 = tt.splat %1 : (i32) -> tensor<64xi32> + %4 = arith.addi %3, %2 : tensor<64xi32> + %5 = tt.splat %arg0 : (!tt.ptr) -> tensor<64x!tt.ptr> + %6 = tt.addptr %5, %4 : tensor<64x!tt.ptr>, tensor<64xi32> + %7 = tt.splat %arg1 : (!tt.ptr) -> tensor<64x!tt.ptr> + %8 = tt.addptr %7, %4 : tensor<64x!tt.ptr>, tensor<64xi32> + %9 = tt.splat %n_elements : (i32) -> tensor<64xi32> + // CHECK: Contiguity: [1] ; Divisibility: [64] ; Constancy: [1] ( %{{.*}} = arith.cmpi slt, %{{.*}}, %{{.*}} : tensor<64xi32> ) + %10 = arith.cmpi slt, %4, %9 : tensor<64xi32> + %11 = tt.load %6, %10 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<64xf32> + %12 = tt.load %8, %10 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<64xf32> + %13 = arith.addf %11, %12 : tensor<64xf32> + %14 = tt.splat %arg2 : (!tt.ptr) -> tensor<64x!tt.ptr> + %15 = tt.addptr %14, %4 : tensor<64x!tt.ptr>, tensor<64xi32> + tt.store %15, %13, %10 : tensor<64xf32> + return +} diff --git a/test/Analysis/test-allocation.mlir b/test/Analysis/test-allocation.mlir new file mode 100644 index 000000000000..888a30dad28c --- /dev/null +++ b/test/Analysis/test-allocation.mlir @@ -0,0 +1,319 @@ +// RUN: triton-opt %s -split-input-file --mlir-disable-threading -test-print-allocation 2>&1 | FileCheck %s + +#AL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [4, 8], warpsPerCTA = [4, 1], order = [1, 0]}> +#sliceAd0 = #triton_gpu.slice<{dim = 0, parent = #AL}> +#BL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [1, 32], warpsPerCTA = [4, 1], order = [1, 0]}> +#A_SHARED = #triton_gpu.shared<{vec = 2, perPhase = 2, maxPhase = 4, order = [1, 0]}> +#B_SHARED = #triton_gpu.shared<{vec = 2, perPhase = 2, maxPhase = 4, order = [1, 0]}> +#C = #triton_gpu.mma<{versionMajor = 2, warpsPerCTA = [4, 1]}> +#A_DOT = #triton_gpu.dot_op<{opIdx = 0, parent = #C}> +#B_DOT = #triton_gpu.dot_op<{opIdx = 1, parent = #C}> + +module attributes {"triton_gpu.num-warps" = 4 : i32} { + +// CHECK-LABEL: matmul_loop +func @matmul_loop(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr) { + %a_ptr_init = tt.broadcast %A : (!tt.ptr) -> tensor<128x32x!tt.ptr, #AL> + %b_ptr_init = tt.broadcast %B : (!tt.ptr) -> tensor<32x128x!tt.ptr, #BL> + + %a_mask = arith.constant dense : tensor<128x32xi1, #AL> + %a_other = arith.constant dense<0.00e+00> : tensor<128x32xf16, #AL> + %b_mask = arith.constant dense : tensor<32x128xi1, #BL> + %b_other = arith.constant dense<0.00e+00> : tensor<32x128xf16, #BL> + %c_init = arith.constant dense<0.00e+00> : tensor<128x128xf32, #C> + + %a_off = arith.constant dense<4> : tensor<128x32xi32, #AL> + %b_off = arith.constant dense<4> : tensor<32x128xi32, #BL> + + scf.for %iv = %lb to %ub step %step iter_args(%a_ptr = %a_ptr_init, %b_ptr = %b_ptr_init, %prev_c = %c_init) -> (tensor<128x32x!tt.ptr, #AL>, tensor<32x128x!tt.ptr, #BL>, tensor<128x128xf32, #C>) { + %a_ = tt.load %a_ptr, %a_mask, %a_other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x32xf16, #AL> + // CHECK: offset = 0, size = 4608 + %a = triton_gpu.convert_layout %a_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A_DOT> + %b_ = tt.load %b_ptr, %b_mask, %b_other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<32x128xf16, #BL> + // CHECK-NEXT: offset = 0, size = 4224 + %b = triton_gpu.convert_layout %b_ : (tensor<32x128xf16, #BL>) -> tensor<32x128xf16, #B_DOT> + + %c = tt.dot %a, %b, %prev_c {allowTF32 = true, transA = false, transB = false} : tensor<128x32xf16, #A_DOT> * tensor<32x128xf16, #B_DOT> -> tensor<128x128xf32, #C> + + %next_a_ptr = tt.addptr %a_ptr, %a_off : tensor<128x32x!tt.ptr, #AL>, tensor<128x32xi32, #AL> + %next_b_ptr = tt.addptr %b_ptr, %b_off : tensor<32x128x!tt.ptr, #BL>, tensor<32x128xi32, #BL> + scf.yield %next_a_ptr, %next_b_ptr, %c : tensor<128x32x!tt.ptr, #AL>, tensor<32x128x!tt.ptr, #BL>, tensor<128x128xf32, #C> + } + return + // CHECK-NEXT: size = 4608 +} + +// Shared memory is available after a tensor's liveness range ends +// CHECK-LABEL: reusable +func @reusable(%A : !tt.ptr) { + %cst1 = arith.constant dense : tensor<128x32xi1, #AL> + %cst2 = arith.constant dense<0.000000e+00> : tensor<128x32xf16, #AL> + %cst3 = arith.constant dense : tensor<32x128xi1, #AL> + %cst4 = arith.constant dense<0.000000e+00> : tensor<32x128xf16, #AL> + %c_init = arith.constant dense<0.00e+00> : tensor<128x128xf32, #C> + + %a_ptr = tt.broadcast %A : (!tt.ptr) -> tensor<128x32x!tt.ptr, #AL> + %b_ptr = tt.broadcast %A : (!tt.ptr) -> tensor<32x128x!tt.ptr, #AL> + %a1_ = tt.load %a_ptr, %cst1, %cst2 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x32xf16, #AL> + // CHECK-NEXT: offset = 0, size = 4608 + %a1 = triton_gpu.convert_layout %a1_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A_DOT> + %a2_ = tt.load %b_ptr, %cst3, %cst4 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<32x128xf16, #AL> + // CHECK-NEXT: offset = 0, size = 1152 + %a2 = triton_gpu.convert_layout %a2_ : (tensor<32x128xf16, #AL>) -> tensor<32x128xf16, #B_DOT> + %a3_ = tt.load %a_ptr, %cst1, %cst2 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x32xf16, #AL> + // CHECK-NEXT: offset = 0, size = 4608 + %a3 = triton_gpu.convert_layout %a3_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A_DOT> + %c = tt.dot %a1, %a2, %c_init {allowTF32 = true, transA = false, transB = false} : tensor<128x32xf16, #A_DOT> * tensor<32x128xf16, #B_DOT> -> tensor<128x128xf32, #C> + %a4_ = tt.load %b_ptr, %cst3, %cst4 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<32x128xf16, #AL> + // CHECK-NEXT: offset = 0, size = 1152 + %a4 = triton_gpu.convert_layout %a4_ : (tensor<32x128xf16, #AL>) -> tensor<32x128xf16, #B_DOT> + %c1 = tt.dot %a3, %a4, %c {allowTF32 = true, transA = false, transB = false} : tensor<128x32xf16, #A_DOT> * tensor<32x128xf16, #B_DOT> -> tensor<128x128xf32, #C> + return + // CHECK-NEXT: size = 4608 +} + +// A tensor's shared memory offset is larger than it needs to accommodate further tensors +// %cst0->%c +// %cst1->%cst4 +// %cst3->%g->%h->%i +// CHECK-LABEL: preallocate +func @preallocate(%A : !tt.ptr) { + // CHECK: offset = 0, size = 512 + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 1024, size = 512 + %cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 1536, size = 512 + %cst2 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 2048, size = 1024 + %a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 3072, size = 1024 + %b = tt.cat %cst0, %cst2 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 0, size = 1024 + %c = tt.cat %cst1, %cst2 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 1024, size = 1024 + %cst4 = arith.constant dense<0.000000e+00> : tensor<32x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 6144, size = 2048 + %e = tt.cat %a, %cst4 {axis = 0} : (tensor<32x16xf16, #A_SHARED>, tensor<32x16xf16, #A_SHARED>) -> tensor<64x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 8192, size = 2048 + %d = tt.cat %b, %cst4 {axis = 0} : (tensor<32x16xf16, #A_SHARED>, tensor<32x16xf16, #A_SHARED>) -> tensor<64x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 10240, size = 2048 + %f = tt.cat %c, %cst4 {axis = 0} : (tensor<32x16xf16, #A_SHARED>, tensor<32x16xf16, #A_SHARED>) -> tensor<64x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 0, size = 2048 + %cst5 = arith.constant dense<0.000000e+00> : tensor<64x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 2048, size = 4096 + %g = tt.cat %e, %cst5 {axis = 0} : (tensor<64x16xf16, #A_SHARED>, tensor<64x16xf16, #A_SHARED>) -> tensor<128x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 2048, size = 4096 + %h = tt.cat %d, %cst5 {axis = 0} : (tensor<64x16xf16, #A_SHARED>, tensor<64x16xf16, #A_SHARED>) -> tensor<128x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 2048, size = 4096 + %i = tt.cat %f, %cst5 {axis = 0} : (tensor<64x16xf16, #A_SHARED>, tensor<64x16xf16, #A_SHARED>) -> tensor<128x16xf16, #A_SHARED> + return + // CHECK-NEXT: size = 12288 +} + +// Unused tensors are immediately released +// CHECK-LABEL: unused +func @unused(%A : !tt.ptr) { + // CHECK: offset = 0, size = 1024 + %cst0 = arith.constant dense<0.000000e+00> : tensor<32x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 0, size = 512 + %cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 512, size = 512 + %cst2 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 1024, size = 1024 + %a = tt.cat %cst1, %cst2 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + return + // CHECK: size = 2048 +} + +// cst0 is alive through the entire function, it cannot be released before the end of the function +// CHECK-LABEL: longlive +func @longlive(%A : !tt.ptr) { + // CHECK: offset = 0, size = 512 + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 512, size = 512 + %cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 1024, size = 512 + %cst2 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 1536, size = 1024 + %a = tt.cat %cst1, %cst2 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 512, size = 512 + %cst3 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 1024, size = 512 + %cst4 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 1536, size = 1024 + %b = tt.cat %cst3, %cst4 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 1536, size = 512 + %cst5 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 1536, size = 512 + %cst6 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 1536, size = 1024 + %c = tt.cat %cst3, %cst4 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 512, size = 1024 + %d = tt.cat %cst0, %cst0 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + return + // CHECK-NEXT: size = 2560 +} + +// CHECK-LABEL: alloc +func @alloc(%A : !tt.ptr) { + // CHECK: offset = 0, size = 512 + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + %cst1 = arith.constant dense<0.000000e+00> : tensor<16x32xf16, #AL> + // CHECK-NEXT: offset = 0, size = 512 + %cst2 = triton_gpu.alloc_tensor : tensor<16x16xf16, #A_SHARED> + return + // CHECK-NEXT: size = 512 +} + +// CHECK-LABEL: scratch +func @scratch() { + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #AL> + // CHECK: scratch offset = 0, size = 512 + %b = tt.reduce %cst0 {redOp = 1 : i32, axis = 0 : i32} : tensor<16x16xf16, #AL> -> tensor<16xf16, #sliceAd0> + return + // CHECK-NEXT: size = 512 +} + +// CHECK-LABEL: insert_slice_async +func @insert_slice_async(%A : !tt.ptr, %i1 : i1) { + %a_ptr = tt.broadcast %A : (!tt.ptr) -> tensor<16x16x!tt.ptr, #AL> + %mask = tt.splat %i1 : (i1) -> tensor<16x16xi1, #AL> + %other = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #AL> + // CHECK: offset = 0, size = 512 + %tensor = arith.constant dense<0.000000e+00> : tensor<1x16x16xf16, #A_SHARED> + %index = arith.constant 0 : i32 + %a = triton_gpu.insert_slice_async %a_ptr, %tensor, %index, %mask, %other {axis = 0 : i32, cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<16x16x!tt.ptr, #AL> -> tensor<1x16x16xf16, #A_SHARED> + return + // CHECK-NEXT: size = 512 +} + +// CHECK-LABEL: extract_slice +func @extract_slice(%A : !tt.ptr) { + // CHECK: offset = 0, size = 512 + %cst0 = arith.constant dense<0.000000e+00> : tensor<1x16x16xf16, #A_SHARED> + %index = arith.constant 0 : index + %cst1 = tensor.extract_slice %cst0[%index, 0, 0][1, 16, 16][1,1,1] : tensor<1x16x16xf16, #A_SHARED> to tensor<16x16xf16, #A_SHARED> + return + // CHECK-NEXT: size = 512 +} + +// B0 -> (B1) -> B0 +// Memory used by B1 can be reused by B0. +// CHECK-LABEL: if +func @if(%i1 : i1) { + // CHECK: offset = 0, size = 512 + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 512, size = 512 + %cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + scf.if %i1 { + // CHECK-NEXT: offset = 1024, size = 1024 + %a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 1024, size = 1024 + %b = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + } + // CHECK-NEXT: offset = 0, size = 512 + %cst2 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 512, size = 512 + %cst3 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 1024, size = 1024 + %a = tt.cat %cst2, %cst3 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + return + // CHECK-NEXT: size = 2048 +} + +// B0 -> (B1) -> (B2) -> B0 +// Memory used by B0 cannot be reused by B1 or B2. +// CHECK-LABEL: if_else +func @if_else(%i1 : i1) { + // CHECK: offset = 0, size = 512 + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 512, size = 512 + %cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + scf.if %i1 { + // CHECK-NEXT: offset = 1024, size = 1024 + %a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 1024, size = 1024 + %b = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + } else { + // CHECK-NEXT: offset = 1024, size = 512 + %cst2 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 1536, size = 512 + %cst3 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: offset = 2048, size = 1024 + %a = tt.cat %cst2, %cst3 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + } + // CHECK-NEXT: offset = 1024, size = 1024 + %a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + return + // CHECK-NEXT: size = 3072 +} + +// Block arguments and yields are memory aliases that do not trigger a new +// allocation. +// CHECK-LABEL: for +func @for(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr) { + // CHECK: offset = 0, size = 8192 + %a_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: offset = 8192, size = 8192 + %b_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: offset = 16384, size = 8192 + %c_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + %a_shared, %b_shared, %c_shared = scf.for %iv = %lb to %ub step %step iter_args(%a_shared = %a_shared_init, %b_shared = %b_shared_init, %c_shared = %c_shared_init) -> (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) { + scf.yield %b_shared, %a_shared, %a_shared : tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED> + } + return + // CHECK-NEXT: size = 24576 +} + +// CHECK-LABEL: for_if_slice +func @for_if_slice(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr, %i1 : i1) { + // CHECK: offset = 0, size = 8192 + %a_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: offset = 8192, size = 8192 + %b_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: offset = 16384, size = 8192 + %c_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + %a_shared, %b_shared, %c_shared = scf.for %iv = %lb to %ub step %step iter_args(%a_shared = %a_shared_init, %b_shared = %b_shared_init, %c_shared = %c_shared_init) -> (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) { + scf.if %i1 { + %index = arith.constant 8 : index + %cst0 = tensor.extract_slice %a_shared[%index, 0][1, 32][1, 1] : tensor<128x32xf16, #A_SHARED> to tensor<32xf16, #A_SHARED> + scf.yield + } + scf.yield %b_shared, %a_shared, %a_shared : tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED> + } + return + // CHECK-NEXT: size = 24576 +} + +// a_shared_init, b_shared_init, and c_shared_init's liveness ranges are span over the entire function before cst2. +// So they cannot be reused by cst0 and cst1, but can be reused by cst2. +// CHECK-LABEL: for_if_for +func @for_if_for(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr, %i1 : i1) { + // CHECK: offset = 0, size = 8192 + %a_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: offset = 8192, size = 8192 + %b_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: offset = 16384, size = 8192 + %c_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + %a_shared, %b_shared, %c_shared = scf.for %iv = %lb to %ub step %step iter_args(%a_shared = %a_shared_init, %b_shared = %b_shared_init, %c_shared = %c_shared_init) -> (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) { + %c_shared_next = scf.for %jv = %lb to %ub step %step iter_args(%c_shared_next = %c_shared) -> (tensor<128x32xf16, #A_SHARED>) { + %c_shared_next_next = scf.if %i1 -> tensor<128x32xf16, #A_SHARED> { + // CHECK-NEXT: offset = 24576, size = 8192 + %cst0 = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + scf.yield %cst0 : tensor<128x32xf16, #A_SHARED> + } else { + // CHECK-NEXT: offset = 32768, size = 8192 + %cst1 = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + scf.yield %cst1 : tensor<128x32xf16, #A_SHARED> + } + scf.yield %c_shared_next_next : tensor<128x32xf16, #A_SHARED> + } + scf.yield %a_shared, %b_shared, %c_shared_next : tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED> + } + // CHECK-NEXT: offset = 0, size = 8192 + %cst2 = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + return + // CHECK-NEXT: size = 40960 +} + +} diff --git a/test/Analysis/test-membar.mlir b/test/Analysis/test-membar.mlir new file mode 100644 index 000000000000..130445448b6b --- /dev/null +++ b/test/Analysis/test-membar.mlir @@ -0,0 +1,318 @@ +// RUN: triton-opt %s -split-input-file --mlir-disable-threading -test-print-membar 2>&1 | FileCheck %s + +#AL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [4, 8], warpsPerCTA = [4, 1], order = [1, 0]}> +#sliceAd0 = #triton_gpu.slice<{dim = 0, parent = #AL}> +#BL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [1, 32], warpsPerCTA = [4, 1], order = [1, 0]}> +#A_SHARED = #triton_gpu.shared<{vec = 2, perPhase = 2, maxPhase = 4, order = [1, 0]}> +#B_SHARED = #triton_gpu.shared<{vec = 2, perPhase = 2, maxPhase = 4, order = [1, 0]}> +#C = #triton_gpu.mma<{versionMajor = 2, warpsPerCTA = [4, 1]}> +#A_DOT = #triton_gpu.dot_op<{opIdx = 0, parent = #C}> +#B_DOT = #triton_gpu.dot_op<{opIdx = 1, parent = #C}> + +module attributes {"triton_gpu.num-warps" = 4 : i32} { + +// CHECK-LABEL: matmul_loop +// There shouldn't be any membar with the dot op encoding. +func @matmul_loop(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr) { + %a_ptr_init = tt.broadcast %A : (!tt.ptr) -> tensor<128x32x!tt.ptr, #AL> + %b_ptr_init = tt.broadcast %B : (!tt.ptr) -> tensor<32x128x!tt.ptr, #BL> + + %a_mask = arith.constant dense : tensor<128x32xi1, #AL> + %a_other = arith.constant dense<0.00e+00> : tensor<128x32xf16, #AL> + %b_mask = arith.constant dense : tensor<32x128xi1, #BL> + %b_other = arith.constant dense<0.00e+00> : tensor<32x128xf16, #BL> + %c_init = arith.constant dense<0.00e+00> : tensor<128x128xf32, #C> + + %a_off = arith.constant dense<4> : tensor<128x32xi32, #AL> + %b_off = arith.constant dense<4> : tensor<32x128xi32, #BL> + + scf.for %iv = %lb to %ub step %step iter_args(%a_ptr = %a_ptr_init, %b_ptr = %b_ptr_init, %prev_c = %c_init) -> (tensor<128x32x!tt.ptr, #AL>, tensor<32x128x!tt.ptr, #BL>, tensor<128x128xf32, #C>) { + %a_ = tt.load %a_ptr, %a_mask, %a_other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x32xf16, #AL> + %a = triton_gpu.convert_layout %a_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A_DOT> + %b_ = tt.load %b_ptr, %b_mask, %b_other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<32x128xf16, #BL> + %b = triton_gpu.convert_layout %b_ : (tensor<32x128xf16, #BL>) -> tensor<32x128xf16, #B_DOT> + %c = tt.dot %a, %b, %prev_c {allowTF32 = true, transA = false, transB = false} : tensor<128x32xf16, #A_DOT> * tensor<32x128xf16, #B_DOT> -> tensor<128x128xf32, #C> + + %next_a_ptr = tt.addptr %a_ptr, %a_off : tensor<128x32x!tt.ptr, #AL>, tensor<128x32xi32, #AL> + %next_b_ptr = tt.addptr %b_ptr, %b_off : tensor<32x128x!tt.ptr, #BL>, tensor<32x128xi32, #BL> + scf.yield %next_a_ptr, %next_b_ptr, %c : tensor<128x32x!tt.ptr, #AL>, tensor<32x128x!tt.ptr, #BL>, tensor<128x128xf32, #C> + } + return +} + +// CHECK-LABEL: raw_single_block +func @raw_single_block(%A : !tt.ptr) { + %cst1 = arith.constant dense : tensor<128x32xi1, #AL> + %cst2 = arith.constant dense<0.000000e+00> : tensor<128x32xf16, #AL> + %a_ptr = tt.broadcast %A : (!tt.ptr) -> tensor<128x32x!tt.ptr, #AL> + %a1_ = tt.load %a_ptr, %cst1, %cst2 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x32xf16, #AL> + %a1 = triton_gpu.convert_layout %a1_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A_SHARED> + // CHECK: Membar 5 + %a2 = triton_gpu.convert_layout %a1 : (tensor<128x32xf16, #A_SHARED>) -> tensor<128x32xf16, #A_SHARED> + return +} + +// CHECK-LABEL: war_single_block +func @war_single_block(%A : !tt.ptr) { + %cst1 = arith.constant dense : tensor<128x32xi1, #AL> + %cst2 = arith.constant dense<0.000000e+00> : tensor<128x32xf16, #AL> + %a_ptr = tt.broadcast %A : (!tt.ptr) -> tensor<128x32x!tt.ptr, #AL> + %a1_ = tt.load %a_ptr, %cst1, %cst2 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x32xf16, #AL> + %a1 = triton_gpu.convert_layout %a1_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A_SHARED> + // CHECK: Membar 5 + %a2 = triton_gpu.convert_layout %a1 : (tensor<128x32xf16, #A_SHARED>) -> tensor<128x32xf16, #AL> + // a2's liveness range ends here, and a3 and a2 have the same address range. + // So it makes sense to have a WAR dependency between a2 and a3. + // CHECK-NEXT: Membar 7 + %a3 = triton_gpu.convert_layout %a1_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A_SHARED> + return +} + +// CHECK-LABEL: scratch +func @scratch() { + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK: Membar 1 + %a = tt.cat %cst0, %cst0 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + // CHECK-NEXT: Membar 3 + %aa = triton_gpu.convert_layout %a : (tensor<32x16xf16, #A_SHARED>) -> tensor<32x16xf16, #AL> + %b = tt.reduce %aa {redOp = 1 : i32, axis = 0 : i32} : tensor<32x16xf16, #AL> -> tensor<16xf16, #sliceAd0> + return +} + +// CHECK-LABEL: async_wait +func @async_wait() { + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK: Membar 1 + %a = tt.cat %cst0, %cst0 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + triton_gpu.async_wait {num = 4 : i32} + // CHECK-NEXT: Membar 4 + %a_ = triton_gpu.convert_layout %a : (tensor<32x16xf16, #A_SHARED>) -> tensor<32x16xf16, #AL> + return +} + +// CHECK-LABEL: alloc +func @alloc() { + %cst0 = triton_gpu.alloc_tensor : tensor<16x16xf16, #A_SHARED> + %a = tt.cat %cst0, %cst0 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + // CHECK: Membar 2 + %b = triton_gpu.convert_layout %a : (tensor<32x16xf16, #A_SHARED>) -> tensor<32x16xf16, #AL> + return +} + +// CHECK-LABEL: extract_slice +func @extract_slice() { + %cst0 = arith.constant dense<0.000000e+00> : tensor<1x16x16xf16, #A_SHARED> + %index = arith.constant 0 : index + %cst1 = tensor.extract_slice %cst0[%index, 0, 0][1, 16, 16][1, 1, 1] : tensor<1x16x16xf16, #A_SHARED> to tensor<16x16xf16, #A_SHARED> + // CHECK: Membar 3 + %cst2 = triton_gpu.convert_layout %cst1 : (tensor<16x16xf16, #A_SHARED>) -> tensor<16x16xf16, #AL> + // CHECK-NEXT: Membar 5 + %cst3 = triton_gpu.convert_layout %cst2 : (tensor<16x16xf16, #AL>) -> tensor<16x16xf16, #A_SHARED> + return +} + +// CHECK-LABEL: insert_slice_async +func @insert_slice_async(%A : !tt.ptr, %i1 : i1) { + %a_ptr = tt.broadcast %A : (!tt.ptr) -> tensor<16x16x!tt.ptr, #AL> + %mask = tt.splat %i1 : (i1) -> tensor<16x16xi1, #AL> + %other = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #AL> + %tensor = triton_gpu.alloc_tensor : tensor<1x16x16xf16, #A_SHARED> + %index = arith.constant 0 : i32 + %a = triton_gpu.insert_slice_async %a_ptr, %tensor, %index, %mask, %other {axis = 0 : i32, cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<16x16x!tt.ptr, #AL> -> tensor<1x16x16xf16, #A_SHARED> + // CHECK: Membar 6 + %b = tt.cat %a, %a {axis = 0} : (tensor<1x16x16xf16, #A_SHARED>, tensor<1x16x16xf16, #A_SHARED>) -> tensor<2x16x16xf16, #A_SHARED> + // CHECK: Membar 8 + %c = tt.cat %b, %b {axis = 0} : (tensor<2x16x16xf16, #A_SHARED>, tensor<2x16x16xf16, #A_SHARED>) -> tensor<4x16x16xf16, #A_SHARED> + return +} + +// CHECK-LABEL: insert_slice +func @insert_slice(%A : !tt.ptr, %i1 : i1) { + %a_ptr = tt.broadcast %A : (!tt.ptr) -> tensor<16x16x!tt.ptr, #AL> + %mask = tt.splat %i1 : (i1) -> tensor<16x16xi1, #AL> + %other = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #AL> + %tensor = arith.constant dense<0.000000e+00> : tensor<1x16x16xf16, #A_SHARED> + %index = arith.constant 0 : index + %al = tt.load %a_ptr, %mask, %other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<16x16xf16, #AL> + // CHECK: Membar 6 + %a = tensor.insert_slice %al into %tensor[%index, 0, 0][1, 16, 16][1, 1, 1]: tensor<16x16xf16, #AL> into tensor<1x16x16xf16, #A_SHARED> + // CHECK: Membar 8 + %b = tt.cat %a, %a {axis = 0} : (tensor<1x16x16xf16, #A_SHARED>, tensor<1x16x16xf16, #A_SHARED>) -> tensor<2x16x16xf16, #A_SHARED> + // CHECK: Membar 10 + %c = tt.cat %b, %b {axis = 0} : (tensor<2x16x16xf16, #A_SHARED>, tensor<2x16x16xf16, #A_SHARED>) -> tensor<4x16x16xf16, #A_SHARED> + return +} + +// If branch inserted a barrier for %cst0 and %cst1, but else didn't, then the barrier should be inserted in the parent region +// CHECK-LABEL: multi_blocks +func @multi_blocks(%i1 : i1) { + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + %cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + scf.if %i1 { + // CHECK: Membar 2 + %a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + scf.yield + } else { + %cst2 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + %cst3 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + // CHECK-NEXT: Membar 7 + %b = tt.cat %cst2, %cst3 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + scf.yield + } + // CHECK-NEXT: Membar 10 + %c = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + return +} + +// Both branches inserted a barrier for %cst0 and %cst1, then the barrier doesn't need to be inserted in the parent region +// CHECK-LABEL: multi_blocks_join_barrier +func @multi_blocks_join_barrier(%i1 : i1) { + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + %cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + scf.if %i1 { + // CHECK: Membar 2 + %a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + scf.yield + } else { + // CHECK-NEXT: Membar 5 + %a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + scf.yield + } + %a_ = triton_gpu.convert_layout %cst0 : (tensor<16x16xf16, #A_SHARED>) -> tensor<16x16xf16, #AL> + return +} + +// Read yielded tensor requires a barrier +// CHECK-LABEL: multi_blocks_yield +func @multi_blocks_yield(%i1 : i1) { + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + %cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + %a = scf.if %i1 -> (tensor<32x16xf16, #A_SHARED>) { + // CHECK: Membar 2 + %a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + scf.yield %a : tensor<32x16xf16, #A_SHARED> + } else { + // CHECK-NEXT: Membar 5 + %b = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + scf.yield %b : tensor<32x16xf16, #A_SHARED> + } + %a_ = triton_gpu.convert_layout %cst0 : (tensor<16x16xf16, #A_SHARED>) -> tensor<16x16xf16, #AL> + // CHECK-NEXT: Membar 9 + %b = tt.cat %a, %a {axis = 0} : (tensor<32x16xf16, #A_SHARED>, tensor<32x16xf16, #A_SHARED>) -> tensor<64x16xf16, #A_SHARED> + return +} + +// Conservatively add a barrier as if the branch (%i1) is never taken +// CHECK-LABEL: multi_blocks_noelse +func @multi_blocks_noelse(%i1 : i1) { + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + %cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + scf.if %i1 { + // CHECK: Membar 2 + %a = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + scf.yield + } + %a_ = triton_gpu.convert_layout %cst0 : (tensor<16x16xf16, #A_SHARED>) -> tensor<16x16xf16, #AL> + return +} + +// Conservatively add a barrier as if the branch (%i2) is never taken +// CHECK-LABEL: multi_blocks_nested_scf +func @multi_blocks_nested_scf(%i1 : i1, %i2 : i1) { + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + %cst1 = arith.constant dense<0.000000e+00> : tensor<16x16xf16, #A_SHARED> + scf.if %i1 { + scf.if %i2 { + // CHECK: Membar 2 + %b = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + scf.yield + } + scf.yield + } else { + // CHECK-NEXT: Membar 6 + %b = tt.cat %cst0, %cst1 {axis = 0} : (tensor<16x16xf16, #A_SHARED>, tensor<16x16xf16, #A_SHARED>) -> tensor<32x16xf16, #A_SHARED> + scf.yield + } + // CHECK-NEXT: Membar 9 + %a_ = triton_gpu.convert_layout %cst0 : (tensor<16x16xf16, #A_SHARED>) -> tensor<16x16xf16, #AL> + return +} + +// CHECK-LABEL: for +func @for(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr) { + %a_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + %b_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + %c_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + %a_shared, %b_shared, %c_shared = scf.for %iv = %lb to %ub step %step iter_args(%a_shared = %a_shared_init, %b_shared = %b_shared_init, %c_shared = %c_shared_init) -> (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) { + // CHECK-NEXT: Membar 3 + %cst0 = tt.cat %a_shared, %b_shared {axis = 0} : (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) -> tensor<256x32xf16, #A_SHARED> + scf.yield %b_shared, %a_shared, %a_shared : tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED> + } + return +} + +// Although a_shared and b_shared are synced before entering the loop, +// they are reassociated with aliases (c_shared) and thus require a barrier. +// CHECK-LABEL: for_alias +func @for_alias(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr) { + %a_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + %b_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: Membar 2 + %cst0 = tt.cat %a_shared_init, %b_shared_init {axis = 0} : (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) -> tensor<256x32xf16, #A_SHARED> + %c_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + %a_shared, %b_shared, %c_shared = scf.for %iv = %lb to %ub step %step iter_args(%a_shared = %a_shared_init, %b_shared = %b_shared_init, %c_shared = %c_shared_init) -> (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) { + %cst1 = tt.cat %a_shared_init, %b_shared_init {axis = 0} : (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) -> tensor<256x32xf16, #AL> + // CHECK-NEXT: Membar 6 + %cst2 = tt.cat %a_shared, %b_shared {axis = 0} : (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) -> tensor<256x32xf16, #AL> + scf.yield %c_shared, %a_shared, %b_shared : tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED> + } + // CHECK-NEXT: Membar 9 + %cst3 = tt.cat %cst0, %cst0 {axis = 0} : (tensor<256x32xf16, #A_SHARED>, tensor<256x32xf16, #A_SHARED>) -> tensor<512x32xf16, #A_SHARED> + return +} + +// Although cst2 is not an argument of scf.yield, its memory is reused by cst1. +// So we need a barrier both before and after cst1 +// CHECK-LABEL: for_reuse +func @for_reuse(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr) { + %a_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + %b_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: Membar 2 + %cst0 = tt.cat %a_shared_init, %b_shared_init {axis = 0} : (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) -> tensor<256x32xf16, #A_SHARED> + %c_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + %a_shared, %b_shared, %c_shared = scf.for %iv = %lb to %ub step %step iter_args(%a_shared = %a_shared_init, %b_shared = %b_shared_init, %c_shared = %c_shared_init) -> (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) { + // CHECK-NEXT: Membar 5 + %cst1 = tt.cat %a_shared_init, %b_shared_init {axis = 0} : (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) -> tensor<256x32xf16, #A_SHARED> + // CHECK-NEXT: Membar 7 + %cst2 = tt.cat %a_shared, %b_shared {axis = 0} : (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) -> tensor<256x32xf16, #A_SHARED> + scf.yield %c_shared, %a_shared, %b_shared : tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED> + } + // CHECK-NEXT: Membar 10 + %cst3 = tt.cat %cst0, %cst0 {axis = 0} : (tensor<256x32xf16, #A_SHARED>, tensor<256x32xf16, #A_SHARED>) -> tensor<512x32xf16, #A_SHARED> + return +} + + +// CHECK-LABEL: for_reuse_nested +func @for_reuse_nested(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr) { + %a_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + %b_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + // CHECK-NEXT: Membar 2 + %cst0 = tt.cat %a_shared_init, %b_shared_init {axis = 0} : (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) -> tensor<256x32xf16, #A_SHARED> + %c_shared_init = arith.constant dense<0.00e+00> : tensor<128x32xf16, #A_SHARED> + %a_shared, %b_shared, %c_shared = scf.for %iv = %lb to %ub step %step iter_args(%a_shared = %a_shared_init, %b_shared = %b_shared_init, %c_shared = %c_shared_init) -> (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) { + // CHECK-NEXT: Membar 5 + %cst1 = tt.cat %a_shared_init, %b_shared_init {axis = 0} : (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) -> tensor<256x32xf16, #A_SHARED> + %a_shared_next, %b_shared_next, %c_shared_next = scf.for %ivv = %lb to %ub step %step iter_args(%a_shared_nested = %a_shared_init, %b_shared_nested = %b_shared_init, %c_shared_nested = %c_shared_init) -> (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) { + // CHECK-NEXT: Membar 7 + %cst2 = tt.cat %a_shared_nested, %b_shared_nested {axis = 0} : (tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>) -> tensor<256x32xf16, #A_SHARED> + scf.yield %c_shared_nested, %a_shared_nested, %b_shared_nested : tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED> + } + scf.yield %c_shared, %a_shared, %b_shared : tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED>, tensor<128x32xf16, #A_SHARED> + } + // CHECK-NEXT: Membar 11 + %cst3 = tt.cat %cst0, %cst0 {axis = 0} : (tensor<256x32xf16, #A_SHARED>, tensor<256x32xf16, #A_SHARED>) -> tensor<512x32xf16, #A_SHARED> + return +} + +} diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt new file mode 100644 index 000000000000..04838a67355d --- /dev/null +++ b/test/CMakeLists.txt @@ -0,0 +1,26 @@ +add_subdirectory(lib) + +llvm_canonicalize_cmake_booleans( + MLIR_ENABLE_BINDINGS_PYTHON +) + +configure_lit_site_cfg( + ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.py.in + ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg.py + MAIN_CONFIG + ${CMAKE_CURRENT_SOURCe_DIR}/lit.cfg.py +) + +set(TRITON_TEST_DEPENDS + triton-opt + FileCheck +) + +add_lit_testsuite(check-triton-lit-tests "Running the triton regression tests" + ${CMAKE_CURRENT_BINARY_DIR} + DEPENDS ${TRITON_TEST_DEPENDS} + ) + +set_target_properties(check-triton-lit-tests PROPERTIES FOLDER "Tests") + +add_lit_testsuites(TRITON-LIT-TESTS ${CMAKE_CURRENT_SOURCE_DIR} DEPENDS ${TRITON_TEST_DEPENDS}) diff --git a/test/Conversion/triton_ops.mlir b/test/Conversion/triton_ops.mlir new file mode 100644 index 000000000000..e9ee50243559 --- /dev/null +++ b/test/Conversion/triton_ops.mlir @@ -0,0 +1,132 @@ +// RUN: triton-opt %s | FileCheck %s + +func @cast_ops(%scalar_ptr: !tt.ptr, %scalar_f32: f32, %scalar_i64: i64) { + // scalar -> scalar + // CHECK: i64 -> !tt.ptr + %0 = tt.int_to_ptr %scalar_i64 : i64 -> !tt.ptr + // CHECK: !tt.ptr -> i64 + %1 = tt.ptr_to_int %scalar_ptr : !tt.ptr -> i64 + // CHECK: f32 to f16 + %2 = arith.truncf %scalar_f32 : f32 to f16 + + // 0D tensor -> 0D tensor + %tensor_ptr_0d = tt.splat %scalar_ptr : (!tt.ptr) -> tensor> + %tensor_f32_0d = tt.splat %scalar_f32 : (f32) -> tensor + %tensor_i64_0d = tt.splat %scalar_i64 : (i64) -> tensor + + // CHECK: tensor -> tensor> + %3 = tt.int_to_ptr %tensor_i64_0d : tensor -> tensor> + // CHECK: tensor> -> tensor + %4 = tt.ptr_to_int %tensor_ptr_0d : tensor> -> tensor + // CHECK: tensor to tensor + %5 = arith.truncf %tensor_f32_0d : tensor to tensor + + // 1D tensor -> 1D tensor + %tensor_ptr_1d = tt.splat %scalar_ptr : (!tt.ptr) -> tensor<16x!tt.ptr> + %tensor_f32_1d = tt.splat %scalar_f32 : (f32) -> tensor<16xf32> + %tensor_i64_1d = tt.splat %scalar_i64 : (i64) -> tensor<16xi64> + + // CHECK: tensor<16xi64> -> tensor<16x!tt.ptr> + %6 = tt.int_to_ptr %tensor_i64_1d : tensor<16xi64> -> tensor<16x!tt.ptr> + // CHECK: tensor<16x!tt.ptr> -> tensor<16xi64> + %7 = tt.ptr_to_int %tensor_ptr_1d : tensor<16x!tt.ptr> -> tensor<16xi64> + // CHECK: tensor<16xf32> to tensor<16xf16> + %8 = arith.truncf %tensor_f32_1d : tensor<16xf32> to tensor<16xf16> + return +} + +func @addptr_ops(%scalar_ptr: !tt.ptr, %scalar_i32: i32) { + // scalar -> scalar + // CHECK: !tt.ptr + %0 = tt.addptr %scalar_ptr, %scalar_i32 : !tt.ptr, i32 + + // 0D tensor -> 0D tensor + %tensor_ptr_0d = tt.splat %scalar_ptr : (!tt.ptr) -> tensor> + %tensor_i32_0d = tt.splat %scalar_i32 : (i32) -> tensor + // CHECK: tensor> + %1 = tt.addptr %tensor_ptr_0d, %tensor_i32_0d : tensor>, tensor + + // 1D tensor -> 1D tensor + %tensor_ptr_1d = tt.splat %scalar_ptr : (!tt.ptr) -> tensor<16x!tt.ptr> + %tensor_i32_1d = tt.splat %scalar_i32 : (i32) -> tensor<16xi32> + // CHECK: tensor<16x!tt.ptr> + %2 = tt.addptr %tensor_ptr_1d, %tensor_i32_1d : tensor<16x!tt.ptr>, tensor<16xi32> + return +} + +func @load_store_ops_scalar(%ptr: !tt.ptr {tt.divisibility = 16 : i32}, %mask : i1) { + // Test if Load/Store ops can handle scalar values + %other = arith.constant 0.0e+0 : f32 + + // load scalar + // CHECK: %[[L0:.*]] = tt.load %{{.*}} {cache = 1 : i32, evict = 1 : i32, isVolatile = true} : f32 + %a = tt.load %ptr {cache = 1 : i32, evict = 1 : i32, isVolatile = true} : f32 + // CHECK: %[[L1:.*]] = tt.load %{{.*}}, %{{.*}} {cache = 1 : i32, evict = 1 : i32, isVolatile = true} : f32 + %b = tt.load %ptr, %mask {cache = 1 : i32, evict = 1 : i32, isVolatile = true} : f32 + // CHECK: %[[L2:.*]] = tt.load %{{.*}}, %{{.*}}, %{{.*}} {cache = 1 : i32, evict = 1 : i32, isVolatile = true} : f32 + %c = tt.load %ptr, %mask, %other {cache = 1 : i32, evict = 1 : i32, isVolatile = true} : f32 + + // store scalar + // CHECK: tt.store %{{.*}}, %[[L0]] : f32 + tt.store %ptr, %a : f32 + // CHECK: tt.store %{{.*}}, %[[L1]], %{{.*}} : f32 + tt.store %ptr, %b, %mask : f32 + // CHECK: tt.store %{{.*}}, %[[L2]], %{{.*}} : f32 + tt.store %ptr, %c, %mask : f32 + return +} + +func @reduce_ops_infer(%ptr: !tt.ptr, %v : tensor<1x2x4xf32>) { + // Test if reduce ops infer types correctly + + // CHECK: %{{.*}} = tt.reduce %{{.*}} -> tensor<2x4xf32> + %a = tt.reduce %v {redOp = 1 : i32, axis = 0 : i32} : tensor<1x2x4xf32> -> tensor<2x4xf32> + // CHECK: %{{.*}} = tt.reduce %{{.*}} -> tensor<1x4xf32> + %b = tt.reduce %v {redOp = 1 : i32, axis = 1 : i32} : tensor<1x2x4xf32> -> tensor<1x4xf32> + // CHECK: %{{.*}} = tt.reduce %{{.*}} -> tensor<1x2xf32> + %c = tt.reduce %v {redOp = 1 : i32, axis = 2 : i32} : tensor<1x2x4xf32> -> tensor<1x2xf32> + // CHECK: %{{.*}} = tt.reduce %{{.*}} -> tensor<1xf32> + %e = tt.reduce %b {redOp = 1 : i32, axis = 1 : i32} : tensor<1x4xf32> -> tensor<1xf32> + // CHECK: %{{.*}} = tt.reduce %{{.*}} -> tensor<4xf32> + %f = tt.reduce %a {redOp = 1 : i32, axis = 0 : i32} : tensor<2x4xf32> -> tensor<4xf32> + // CHECK: %{{.*}} = tt.reduce %{{.*}} -> f32 + %g = tt.reduce %f {redOp = 1 : i32, axis = 0 : i32} : tensor<4xf32> -> f32 + + // Avoid optimizations for c, e, and g + %ptr1x2 = tt.splat %ptr : (!tt.ptr) -> tensor<1x2x!tt.ptr> + %ptr1 = tt.splat %ptr : (!tt.ptr) -> tensor<1x!tt.ptr> + tt.store %ptr1x2, %c : tensor<1x2xf32> + tt.store %ptr1, %e : tensor<1xf32> + tt.store %ptr, %g : f32 + return +} + +func @dot_ops_infer(%ptr: !tt.ptr, %v : f32) { + // Test if reduce ops infer types correctly + %v128x32 = tt.splat %v : (f32) -> tensor<128x32xf32> + %v32x128 = tt.splat %v : (f32) -> tensor<32x128xf32> + %v128x1 = tt.splat %v : (f32) -> tensor<128x1xf32> + %v1x128 = tt.splat %v : (f32) -> tensor<1x128xf32> + + %zero128x128 = arith.constant dense<0.00e+00> : tensor<128x128xf32> + %zero32x32 = arith.constant dense<0.00e+00> : tensor<32x32xf32> + %zero1x1 = arith.constant dense<0.00e+00> : tensor<1x1xf32> + + // CHECK: %{{.*}} = tt.dot %{{.*}} -> tensor<128x128xf32> + %r1 = tt.dot %v128x32, %v32x128, %zero128x128 {allowTF32 = true, transA = false, transB = false} : tensor<128x32xf32> * tensor<32x128xf32> -> tensor<128x128xf32> + // CHECK: %{{.*}} = tt.dot %{{.*}} -> tensor<32x32xf32> + %r2 = tt.dot %v32x128, %v128x32, %zero32x32 {allowTF32 = true, transA = false, transB = false} : tensor<32x128xf32> * tensor<128x32xf32> -> tensor<32x32xf32> + // CHECK: %{{.*}} = tt.dot %{{.*}} -> tensor<128x128xf32> + %r3 = tt.dot %v128x1, %v1x128, %zero128x128 {allowTF32 = true, transA = false, transB = false} : tensor<128x1xf32> * tensor<1x128xf32> -> tensor<128x128xf32> + // CHECK: %{{.*}} = tt.dot %{{.*}} -> tensor<1x1xf32> + %r4 = tt.dot %v1x128, %v128x1, %zero1x1 {allowTF32 = true, transA = false, transB = false} : tensor<1x128xf32> * tensor<128x1xf32> -> tensor<1x1xf32> + + %ptr128x128 = tt.splat %ptr : (!tt.ptr) -> tensor<128x128x!tt.ptr> + %ptr32x32 = tt.splat %ptr : (!tt.ptr) -> tensor<32x32x!tt.ptr> + %ptr1x1 = tt.splat %ptr : (!tt.ptr) -> tensor<1x1x!tt.ptr> + tt.store %ptr128x128, %r1 : tensor<128x128xf32> + tt.store %ptr32x32, %r2 : tensor<32x32xf32> + tt.store %ptr128x128, %r3 : tensor<128x128xf32> + tt.store %ptr1x1, %r4 : tensor<1x1xf32> + return +} diff --git a/test/Conversion/triton_to_tritongpu.mlir b/test/Conversion/triton_to_tritongpu.mlir new file mode 100644 index 000000000000..a160bc881595 --- /dev/null +++ b/test/Conversion/triton_to_tritongpu.mlir @@ -0,0 +1,53 @@ +// RUN: triton-opt %s -split-input-file -convert-triton-to-tritongpu=num-warps=2 | FileCheck %s + +func @ops() { + // CHECK: module attributes {"triton_gpu.num-warps" = 2 : i32} {{.*}} + %a = arith.constant dense<1.00e+00> : tensor<128x32xf16> + %b = arith.constant dense<2.00e+00> : tensor<32x128xf16> + %c = arith.constant dense<3.00e+00> : tensor<128x128xf32> + %0 = tt.dot %a, %b, %c {allowTF32 = true, transA = false, transB = false} : tensor<128x32xf16> * tensor<32x128xf16> -> tensor<128x128xf32> + return +} + +// ----- + +func @load_ops(%ptr: !tt.ptr {tt.divisibility = 16 : i32}) { + // Test if LoadOp is lowered properly (see #771) + %ptrs = tt.splat %ptr : (!tt.ptr) -> tensor<128x!tt.ptr> + %mask = arith.constant dense : tensor<128xi1> + %other = arith.constant dense<0.0e+0> : tensor<128xf32> + // CHECK: %{{.*}} = tt.load %{{.*}} {cache = 1 : i32, evict = 1 : i32, isVolatile = true} : {{.*}} + %a = tt.load %ptrs {cache = 1 : i32, evict = 1 : i32, isVolatile = true} : tensor<128xf32> + // CHECK: %{{.*}} = tt.load %{{.*}}, %{{.*}} {cache = 1 : i32, evict = 1 : i32, isVolatile = true} : {{.*}} + %b = tt.load %ptrs, %mask {cache = 1 : i32, evict = 1 : i32, isVolatile = true} : tensor<128xf32> + // CHECK: %{{.*}} = tt.load %{{.*}}, %{{.*}}, %{{.*}} {cache = 1 : i32, evict = 1 : i32, isVolatile = true} : {{.*}} + %c = tt.load %ptrs, %mask, %other {cache = 1 : i32, evict = 1 : i32, isVolatile = true} : tensor<128xf32> + tt.store %ptrs, %a : tensor<128xf32> + tt.store %ptrs, %b : tensor<128xf32> + tt.store %ptrs, %c : tensor<128xf32> + return +} + +// ----- + +func @reduce_ops(%ptr: !tt.ptr {tt.divisibility = 16 : i32}) { + // Test if the total number of threadsPerWarp is 32 + // Test if the total number of warps is 2 + // CHECK: #blocked0 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [4, 8], warpsPerCTA = [1, 2], order = [0, 1]}> + // CHECK: #blocked1 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [8, 4], warpsPerCTA = [1, 2], order = [0, 1]}> + // CHECK: #blocked2 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [16, 2], warpsPerCTA = [1, 2], order = [0, 1]}> + // CHECK: module attributes {"triton_gpu.num-warps" = 2 : i32} {{.*}} + %c0 = arith.constant dense<1.00e+00> : tensor<4x4xf32> + %c1 = arith.constant dense<2.00e+00> : tensor<8x2xf32> + %c2 = arith.constant dense<3.00e+00> : tensor<16x16xf32> + // CHECK: tensor<4x4xf32, #blocked0> -> tensor<4xf32, #triton_gpu.slice<{dim = 0, parent = #blocked0}>> + %c0_ = tt.reduce %c0 {redOp = 1 : i32, axis = 0 : i32} : tensor<4x4xf32> -> tensor<4xf32> + // CHECK: tensor<8x2xf32, #blocked1> -> tensor<2xf32, #triton_gpu.slice<{dim = 0, parent = #blocked1}> + %c1_ = tt.reduce %c1 {redOp = 1 : i32, axis = 0 : i32} : tensor<8x2xf32> -> tensor<2xf32> + // CHECK: tensor<8x2xf32, #blocked1> -> tensor<8xf32, #triton_gpu.slice<{dim = 1, parent = #blocked1}>> + %c2_ = tt.reduce %c1 {redOp = 1 : i32, axis = 1 : i32} : tensor<8x2xf32> -> tensor<8xf32> + // CHECK: tensor<16x16xf32, #blocked2> -> tensor<16xf32, #triton_gpu.slice<{dim = 0, parent = #blocked2}>> + %c3_ = tt.reduce %c2 {redOp = 1 : i32, axis = 0 : i32} : tensor<16x16xf32> -> tensor<16xf32> + + return +} diff --git a/test/Conversion/tritongpu_to_llvm.mlir b/test/Conversion/tritongpu_to_llvm.mlir new file mode 100644 index 000000000000..abc5e9a316f2 --- /dev/null +++ b/test/Conversion/tritongpu_to_llvm.mlir @@ -0,0 +1,1016 @@ +// RUN: triton-opt %s -split-input-file --convert-triton-gpu-to-llvm | FileCheck %s + +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK: llvm.func @test_empty_kernel(%arg0: i32, %arg1: !llvm.ptr) + // Here the 128 comes from the 4 in module attribute multiples 32 + // CHECK: attributes {nvvm.kernel = 1 : ui1, nvvm.maxntid = 128 : i32} {{.*}} + func @test_empty_kernel(%lb : index, %A : !tt.ptr) { + // CHECK: llvm.return + return + } +} // end module + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: basic_load + func @basic_load(%a_ptr_init : tensor<256x!tt.ptr, #blocked0>, %cst : tensor<256xi1, #blocked0>, %cst_0 : tensor<256xf32, #blocked0>) { + // CHECK: llvm.inline_asm + // CHECK: llvm.inline_asm + %1 = tt.load %a_ptr_init, %cst, %cst_0 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32, #blocked0> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [2], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: vectorized_load + func @vectorized_load(%a_ptr_init : tensor<256x!tt.ptr, #blocked0>, %cst : tensor<256xi1, #blocked0>, %cst_0 : tensor<256xf32, #blocked0>) { + // CHECK: llvm.inline_asm + // CHECK-SAME: ld.global.b32 + // CHECK: llvm.inline_asm + // CHECK-SAME: ld.global.b32 + %1 = tt.load %a_ptr_init, %cst, %cst_0 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32, #blocked0> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [8], threadsPerWarp = [32], warpsPerCTA = [1], order = [0]}> +module attributes {"triton_gpu.num-warps" = 1 : i32} { + // CHECK-LABEL: vectorized_load_f16 + func @vectorized_load_f16(%a_ptr_init: tensor<256x!tt.ptr, #blocked0>, %cst : tensor<256xi1, #blocked0>, %cst_0 : tensor<256xf16, #blocked0>) { + // CHECK: llvm.inline_asm + // CHECK-SAME: ld.global.b16 + // CHECK: llvm.inline_asm + // CHECK-SAME: ld.global.b16 + %1 = tt.load %a_ptr_init, %cst, %cst_0 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf16, #blocked0> + return + } +} + +// ----- + +// TODO: masked load with vectorization is pending on TODO +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [8], order = [0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: masked_load_const_other + func @masked_load_const_other(%a_ptr_init : tensor<256x!tt.ptr, #blocked0>, %cst : tensor<256xi1, #blocked0>) { + %cst_0 = arith.constant dense<0.000000e+00> : tensor<256xf32, #blocked0> + %1 = tt.load %a_ptr_init, %cst, %cst_0 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32, #blocked0> + return + } +} + +// ----- + +// TODO: masked load with vectorization is pending on TODO +#blocked0 = #triton_gpu.blocked<{sizePerThread = [4], threadsPerWarp = [32], warpsPerCTA = [8], order = [0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: masked_load_const_other_vec + func @masked_load_const_other_vec(%a_ptr_init : tensor<256x!tt.ptr, #blocked0>, %cst : tensor<256xi1, #blocked0>) { + %cst_0 = arith.constant dense<0.000000e+00> : tensor<256xf32, #blocked0> + %1 = tt.load %a_ptr_init, %cst, %cst_0 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32, #blocked0> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [2], order = [0]}> +module attributes {"triton_gpu.num-warps" = 2 : i32} { + // CHECK-LABEL: global_load_store_no_vec + func @global_load_store_no_vec(%arg0: !tt.ptr {tt.divisibility = 4 : i32}, %arg1: !tt.ptr {tt.divisibility = 4 : i32}, %arg2: !tt.ptr {tt.divisibility = 4 : i32}, %arg3: i32) { + %c256_i32 = arith.constant 256 : i32 + %0 = tt.get_program_id {axis = 0 : i32} : i32 + %1 = arith.muli %0, %c256_i32 : i32 + %2 = tt.make_range {end = 256 : i32, start = 0 : i32} : tensor<256xi32, #blocked0> + %3 = tt.splat %1 : (i32) -> tensor<256xi32, #blocked0> + %4 = arith.addi %3, %2 : tensor<256xi32, #blocked0> + %5 = tt.splat %arg0 : (!tt.ptr) -> tensor<256x!tt.ptr, #blocked0> + %6 = tt.addptr %5, %4 : tensor<256x!tt.ptr, #blocked0>, tensor<256xi32, #blocked0> + %7 = tt.splat %arg1 : (!tt.ptr) -> tensor<256x!tt.ptr, #blocked0> + %8 = tt.addptr %7, %4 : tensor<256x!tt.ptr, #blocked0>, tensor<256xi32, #blocked0> + + // Load 4 elements from vector0 + // CHECK: "@${{.*}} ld.global.b32 { ${{.*}} }, [ ${{.*}} + 0 ]; + // CHECK: "@${{.*}} ld.global.b32 { ${{.*}} }, [ ${{.*}} + 0 ]; + // CHECK: "@${{.*}} ld.global.b32 { ${{.*}} }, [ ${{.*}} + 0 ]; + // CHECK: "@${{.*}} ld.global.b32 { ${{.*}} }, [ ${{.*}} + 0 ]; + + // Load 4 elements from vector1 + // CHECK: "@${{.*}} ld.global.b32 { ${{.*}} }, [ ${{.*}} + 0 ]; + // CHECK: "@${{.*}} ld.global.b32 { ${{.*}} }, [ ${{.*}} + 0 ]; + // CHECK: "@${{.*}} ld.global.b32 { ${{.*}} }, [ ${{.*}} + 0 ]; + // CHECK: "@${{.*}} ld.global.b32 { ${{.*}} }, [ ${{.*}} + 0 ]; + %9 = tt.load %6 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32, #blocked0> + %10 = tt.load %8 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32, #blocked0> + %11 = arith.addf %9, %10 : tensor<256xf32, #blocked0> + %12 = tt.splat %arg2 : (!tt.ptr) -> tensor<256x!tt.ptr, #blocked0> + %13 = tt.addptr %12, %4 : tensor<256x!tt.ptr, #blocked0>, tensor<256xi32, #blocked0> + + // Store 4 elements to global + // CHECK: @${{.*}} st.global.b32 [ ${{.*}} + 0 ], { ${{.*}} }; + // CHECK: @${{.*}} st.global.b32 [ ${{.*}} + 0 ], { ${{.*}} }; + // CHECK: @${{.*}} st.global.b32 [ ${{.*}} + 0 ], { ${{.*}} }; + // CHECK: @${{.*}} st.global.b32 [ ${{.*}} + 0 ], { ${{.*}} }; + tt.store %13, %11 : tensor<256xf32, #blocked0> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [4], threadsPerWarp = [32], warpsPerCTA = [2], order = [0]}> +module attributes {"triton_gpu.num-warps" = 2 : i32} { + // CHECK-LABEL: global_load_store_vec4 + func @global_load_store_vec4(%arg0: !tt.ptr {tt.divisibility = 4 : i32}, %arg1: !tt.ptr {tt.divisibility = 4 : i32}, %arg2: !tt.ptr {tt.divisibility = 4 : i32}, %arg3: i32) { + %c256_i32 = arith.constant 256 : i32 + %0 = tt.get_program_id {axis = 0 : i32} : i32 + %1 = arith.muli %0, %c256_i32 : i32 + %2 = tt.make_range {end = 256 : i32, start = 0 : i32} : tensor<256xi32, #blocked0> + %3 = tt.splat %1 : (i32) -> tensor<256xi32, #blocked0> + %4 = arith.addi %3, %2 : tensor<256xi32, #blocked0> + %5 = tt.splat %arg0 : (!tt.ptr) -> tensor<256x!tt.ptr, #blocked0> + %6 = tt.addptr %5, %4 : tensor<256x!tt.ptr, #blocked0>, tensor<256xi32, #blocked0> + %7 = tt.splat %arg1 : (!tt.ptr) -> tensor<256x!tt.ptr, #blocked0> + %8 = tt.addptr %7, %4 : tensor<256x!tt.ptr, #blocked0>, tensor<256xi32, #blocked0> + + // Load 4 elements from A with single one vectorized load instruction + // CHECK: @${{.*}} ld.global.v4.b32 { ${{.*}}, ${{.*}}, ${{.*}}, ${{.*}} }, [ ${{.*}} + 0 ]; + + // Load 4 elements from B with single one vectorized load instruction + // CHECK: @${{.*}} ld.global.v4.b32 { ${{.*}}, ${{.*}}, ${{.*}}, ${{.*}} }, [ ${{.*}} + 0 ]; + + %9 = tt.load %6 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32, #blocked0> + %10 = tt.load %8 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32, #blocked0> + %11 = arith.addf %9, %10 : tensor<256xf32, #blocked0> + %12 = tt.splat %arg2 : (!tt.ptr) -> tensor<256x!tt.ptr, #blocked0> + %13 = tt.addptr %12, %4 : tensor<256x!tt.ptr, #blocked0>, tensor<256xi32, #blocked0> + + // Store 4 elements to global with single one vectorized store instruction + // CHECK: @$5 st.global.v4.b32 [ ${{.*}} + 0 ], { ${{.*}}, ${{.*}}, ${{.*}}, ${{.*}} }; + tt.store %13, %11 : tensor<256xf32, #blocked0> + return + } +} + +// ----- + +// This test verifies the vectorization of Load and Store Ops. +#blocked = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [2], order = [0]}> +// Note, the %n_elements doesn't have a "tt.divisibility" hint, so Triton assumes it's divisibility is 1, this should effect the mask's alignment and further restrict the load/store ops' vector width to be 1. +module attributes {"triton_gpu.num-warps" = 2 : i32} { + func @vecadd_masked_vec1(%arg0: !tt.ptr {tt.divisibility = 16 : i32}, %arg1: !tt.ptr {tt.divisibility = 16 : i32}, %arg2: !tt.ptr {tt.divisibility = 16 : i32}, %n_elements: i32) { + %c64_i32 = arith.constant 64 : i32 + %0 = tt.get_program_id {axis = 0 : i32} : i32 + %1 = arith.muli %0, %c64_i32 : i32 + %2 = tt.make_range {end = 64 : i32, start = 0 : i32} : tensor<64xi32, #blocked> + %3 = tt.splat %1 : (i32) -> tensor<64xi32, #blocked> + %4 = arith.addi %3, %2 : tensor<64xi32, #blocked> + %5 = tt.splat %arg0 : (!tt.ptr) -> tensor<64x!tt.ptr, #blocked> + %6 = tt.addptr %5, %4 : tensor<64x!tt.ptr, #blocked>, tensor<64xi32, #blocked> + %7 = tt.splat %arg1 : (!tt.ptr) -> tensor<64x!tt.ptr, #blocked> + %8 = tt.addptr %7, %4 : tensor<64x!tt.ptr, #blocked>, tensor<64xi32, #blocked> + %9 = tt.splat %n_elements : (i32) -> tensor<64xi32, #blocked> + %10 = "triton_gpu.cmpi"(%4, %9) {predicate = 2 : i64} : (tensor<64xi32, #blocked>, tensor<64xi32, #blocked>) -> tensor<64xi1, #blocked> + // load op has a vector width = 1 due to the %mask's alignment + // CHECK: ld.global.b32 + %11 = tt.load %6, %10 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<64xf32, #blocked> + %12 = tt.load %8, %10 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<64xf32, #blocked> + %13 = arith.addf %11, %12 : tensor<64xf32, #blocked> + %14 = tt.splat %arg2 : (!tt.ptr) -> tensor<64x!tt.ptr, #blocked> + %15 = tt.addptr %14, %4 : tensor<64x!tt.ptr, #blocked>, tensor<64xi32, #blocked> + tt.store %15, %13, %10 : tensor<64xf32, #blocked> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [8], threadsPerWarp = [32], warpsPerCTA = [1], order = [0]}> +module attributes {"triton_gpu.num-warps" = 1 : i32} { + // CHECK-LABEL: global_load_store_vec8 + func @global_load_store_vec8(%arg0: !tt.ptr {tt.divisibility = 4 : i32}, %arg1: !tt.ptr {tt.divisibility = 4 : i32}, %arg2: !tt.ptr {tt.divisibility = 4 : i32}, %arg3: i32) { + %c256_i32 = arith.constant 256 : i32 + %0 = tt.get_program_id {axis = 0 : i32} : i32 + %1 = arith.muli %0, %c256_i32 : i32 + %2 = tt.make_range {end = 256 : i32, start = 0 : i32} : tensor<256xi32, #blocked0> + %3 = tt.splat %1 : (i32) -> tensor<256xi32, #blocked0> + %4 = arith.addi %3, %2 : tensor<256xi32, #blocked0> + %5 = tt.splat %arg0 : (!tt.ptr) -> tensor<256x!tt.ptr, #blocked0> + %6 = tt.addptr %5, %4 : tensor<256x!tt.ptr, #blocked0>, tensor<256xi32, #blocked0> + %7 = tt.splat %arg1 : (!tt.ptr) -> tensor<256x!tt.ptr, #blocked0> + %8 = tt.addptr %7, %4 : tensor<256x!tt.ptr, #blocked0>, tensor<256xi32, #blocked0> + + // Load 8 elements from A with two vectorized load instruction + // CHECK: @${{.*}} ld.global.v4.b32 { ${{.*}}, ${{.*}}, ${{.*}}, ${{.*}} }, [ ${{.*}} + 0 ]; + // CHECK: @${{.*}} ld.global.v4.b32 { ${{.*}}, ${{.*}}, ${{.*}}, ${{.*}} }, [ ${{.*}} + 0 ]; + + // Load 8 elements from B with two vectorized load instruction + // CHECK: @${{.*}} ld.global.v4.b32 { ${{.*}}, ${{.*}}, ${{.*}}, ${{.*}} }, [ ${{.*}} + 0 ]; + // CHECK: @${{.*}} ld.global.v4.b32 { ${{.*}}, ${{.*}}, ${{.*}}, ${{.*}} }, [ ${{.*}} + 0 ]; + + %9 = tt.load %6 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32, #blocked0> + %10 = tt.load %8 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32, #blocked0> + %11 = arith.addf %9, %10 : tensor<256xf32, #blocked0> + %12 = tt.splat %arg2 : (!tt.ptr) -> tensor<256x!tt.ptr, #blocked0> + %13 = tt.addptr %12, %4 : tensor<256x!tt.ptr, #blocked0>, tensor<256xi32, #blocked0> + + // Store 8 elements to global with two vectorized store instruction + // CHECK: @$5 st.global.v4.b32 [ ${{.*}} + 0 ], { ${{.*}}, ${{.*}}, ${{.*}}, ${{.*}} }; + // CHECK: @$5 st.global.v4.b32 [ ${{.*}} + 0 ], { ${{.*}}, ${{.*}}, ${{.*}}, ${{.*}} }; + tt.store %13, %11 : tensor<256xf32, #blocked0> + return + } +} + +// TODO: Add a testcase to verify the optimization when ptr of the LoadOp +// is from an addptr with const idx + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}> +#blocked2 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [32, 1], warpsPerCTA = [4, 1], order = [0, 1]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: basic_view_broadcast + func @basic_view_broadcast(%arg : tensor<256xf32,#blocked0>) { + // CHECK: llvm.mlir.undef + // CHECK: %[[T0:.*]] = llvm.extractvalue + // CHECK: %[[T1:.*]] = llvm.extractvalue + %0 = tt.view %arg : (tensor<256xf32, #blocked0>) -> tensor<256x1xf32,#blocked2> + // CHECK: llvm.mlir.undef + // CHECK: llvm.insertvalue %[[T0]] + // CHECK: llvm.insertvalue %[[T1]] + // CHECK: llvm.insertvalue %[[T0]] + // CHECK: llvm.insertvalue %[[T1]] + // CHECK: llvm.insertvalue %[[T0]] + // CHECK: llvm.insertvalue %[[T1]] + // CHECK: llvm.insertvalue %[[T0]] + // CHECK: llvm.insertvalue %[[T1]] + %1 = tt.broadcast %0 : (tensor<256x1xf32,#blocked2>) -> tensor<256x4xf32, #blocked2> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [2], threadsPerWarp = [32], warpsPerCTA = [1], order = [0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: basic_make_range + func @basic_make_range() { + // CHECK: nvvm.read.ptx.sreg.tid.x + // CHECK: llvm.mlir.undef + // CHECK: llvm.insertvalue + // CHECK: llvm.insertvalue + %0 = tt.make_range {end = 256 : i32, start = 0 : i32} : tensor<256xi32, #blocked0> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: basic_addf + func @basic_addf(%arg0 : tensor<256xf32,#blocked0>, %arg1 : tensor<256xf32,#blocked0>) { + // CHECK: llvm.fadd + // CHECK: llvm.fadd + %1 = arith.addf %arg0, %arg1 : tensor<256xf32,#blocked0> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: basic_addi + func @basic_addi(%arg0 : tensor<256xi32,#blocked0>, %arg1 : tensor<256xi32,#blocked0>) { + // CHECK: llvm.add + // CHECK: llvm.add + %1 = arith.addi %arg0, %arg1 : tensor<256xi32,#blocked0> + return + } +} + +// ----- + +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: basic_program_id + func @basic_program_id() { + // CHECK: nvvm.read.ptx.sreg.ctaid.x : i32 + %0 = tt.get_program_id {axis = 0 : i32} : i32 + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: basic_addptr + func @basic_addptr(%arg0 : tensor<256x!tt.ptr,#blocked0>, %arg1 : tensor<256xi32,#blocked0>) { + // CHECK: llvm.getelementptr + // CHECK: llvm.getelementptr + %0 = tt.addptr %arg0, %arg1 : tensor<256x!tt.ptr, #blocked0>, tensor<256xi32, #blocked0> + return + } +} + +// ----- + +#shared0 = #triton_gpu.shared<{vec = 2, perPhase = 2, maxPhase = 4, order = [1, 0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK: llvm.mlir.global external @global_smem + // CHECK-LABEL: basic_alloc_tensor + func @basic_alloc_tensor() { + // CHECK: llvm.mlir.addressof @global_smem + // CHECK-NEXT: llvm.bitcast + // CHECK-NEXT: llvm.mlir.constant + // CHECK-NEXT: llvm.getelementptr + // CHECK-NEXT: llvm.bitcast + %0 = triton_gpu.alloc_tensor : tensor<16x16xf16, #shared0> + return + } +} + +// ----- + +#shared0 = #triton_gpu.shared<{vec = 2, perPhase = 2, maxPhase = 4, order = [1, 0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK: llvm.mlir.global external @global_smem + // CHECK-LABEL: basic_extract_slice + func @basic_extract_slice() { + // CHECK: llvm.mlir.addressof @global_smem + // CHECK: llvm.extractvalue + // CHECK-NEXT: llvm.extractvalue + // CHECK-NEXT: llvm.extractvalue + // CHECK-NEXT: llvm.extractvalue + // CHECK-NEXT: llvm.extractvalue + // CHECK-NEXT: llvm.extractvalue + // CHECK-NEXT: llvm.extractvalue + // CHECK-NEXT: llvm.add + // CHECK-NEXT: llvm.mlir.constant(0 : i32) : i32 + // CHECK-NEXT: llvm.add + // CHECK-NEXT: llvm.mlir.constant(0 : i32) : i32 + // CHECK-NEXT: llvm.add + // CHECK-NEXT: llvm.mlir.constant(0 : i32) : i32 + // CHECK-NEXT: llvm.mul + // CHECK-NEXT: llvm.add + // CHECK-NEXT: llvm.mul + // CHECK-NEXT: llvm.add + // CHECK-NEXT: llvm.mul + // CHECK-NEXT: llvm.add + // CHECK-NEXT: llvm.getelementptr + %index = arith.constant 1 : index + %0 = triton_gpu.alloc_tensor : tensor<128x16x32xf32, #shared0> + %1 = tensor.extract_slice %0[%index, 0, 0][1, 16, 32][1, 1, 1] : tensor<128x16x32xf32, #shared0> to tensor<16x32xf32, #shared0> + return + } +} + +// ----- + +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: basic_async_wait + func @basic_async_wait() { + // CHECK: cp.async.wait_group 0x4 + triton_gpu.async_wait {num = 4: i32} + return + } +} + +// ----- + +#block0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [4], warpsPerCTA = [4], order = [0]}> +#block1 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [8], warpsPerCTA = [4], order = [0]}> +#block2 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [4, 1], warpsPerCTA = [4, 1], order = [1, 0]}> +#block3 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [1, 8], warpsPerCTA = [1, 4], order = [1, 0]}> +#slice2d1 = #triton_gpu.slice<{dim = 1, parent=#block2}> +#slice3d0 = #triton_gpu.slice<{dim = 0, parent=#block3}> +#AL = #triton_gpu.blocked<{sizePerThread = [1, 8], threadsPerWarp = [4, 8], warpsPerCTA = [4, 1], order = [1, 0]}> +#A = #triton_gpu.shared<{vec = 8, perPhase = 1, maxPhase = 4, order = [1, 0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: basic_insert_slice_async_fallback + func @basic_insert_slice_async_fallback(%arg0: !tt.ptr {tt.divisibility = 1 : i32}) { + %off0_ = tt.make_range {end = 16 : i32, start = 0 : i32} : tensor<16xi32, #slice2d1> + %off1_ = tt.make_range {end = 32 : i32, start = 0 : i32} : tensor<64xi32, #slice3d0> + %off0 = tt.expand_dims %off0_ {axis = 1 : i32} : (tensor<16xi32, #slice2d1>) -> tensor<16x1xi32, #block2> + %off1 = tt.expand_dims %off1_ {axis = 0 : i32} : (tensor<64xi32, #slice3d0>) -> tensor<1x64xi32, #block3> + %broadcast_off0_scalar = tt.broadcast %off0 : (tensor<16x1xi32, #block2>) -> tensor<16x64xi32, #block2> + %cst_scalar = arith.constant 64 : i32 + %cst = tt.splat %cst_scalar : (i32) -> tensor<16x64xi32, #block2> + %broadcast_off0_ = arith.muli %broadcast_off0_scalar, %cst : tensor<16x64xi32, #block2> + %broadcast_off1_ = tt.broadcast %off1 : (tensor<1x64xi32, #block3>) -> tensor<16x64xi32, #block3> + %broadcast_off0 = triton_gpu.convert_layout %broadcast_off0_ : (tensor<16x64xi32, #block2>) -> tensor<16x64xi32, #AL> + %broadcast_off1 = triton_gpu.convert_layout %broadcast_off1_ : (tensor<16x64xi32, #block3>) -> tensor<16x64xi32, #AL> + %off = arith.addi %broadcast_off0, %broadcast_off1 : tensor<16x64xi32, #AL> + %a_init = tt.splat %arg0 : (!tt.ptr) -> tensor<16x64x!tt.ptr, #AL> + %a_ptr = tt.addptr %a_init, %off : tensor<16x64x!tt.ptr, #AL>, tensor<16x64xi32, #AL> + %tensor = triton_gpu.alloc_tensor : tensor<2x16x64xf16, #A> + %index = arith.constant 1 : i32 + + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + %a = triton_gpu.insert_slice_async %a_ptr, %tensor, %index {axis = 0 : i32, cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<16x64x!tt.ptr, #AL> -> tensor<2x16x64xf16, #A> + return + } +} + +// ----- + +#block0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [4], warpsPerCTA = [4], order = [0]}> +#block1 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [8], warpsPerCTA = [4], order = [0]}> +#block2 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [4, 1], warpsPerCTA = [4, 1], order = [1, 0]}> +#block3 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [1, 8], warpsPerCTA = [1, 4], order = [1, 0]}> +#slice2d1 = #triton_gpu.slice<{dim = 1, parent=#block2}> +#slice3d0 = #triton_gpu.slice<{dim = 0, parent=#block3}> +#AL = #triton_gpu.blocked<{sizePerThread = [1, 8], threadsPerWarp = [4, 8], warpsPerCTA = [4, 1], order = [1, 0]}> +#A = #triton_gpu.shared<{vec = 8, perPhase = 1, maxPhase = 4, order = [1, 0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: basic_insert_slice_async_v4 + func @basic_insert_slice_async_v4(%arg0: !tt.ptr {tt.divisibility = 8 : i32}) { + %off0_ = tt.make_range {end = 16 : i32, start = 0 : i32} : tensor<16xi32, #slice2d1> + %off1_ = tt.make_range {end = 32 : i32, start = 0 : i32} : tensor<64xi32, #slice3d0> + %off0 = tt.expand_dims %off0_ {axis = 1 : i32} : (tensor<16xi32, #slice2d1>) -> tensor<16x1xi32, #block2> + %off1 = tt.expand_dims %off1_ {axis = 0 : i32} : (tensor<64xi32, #slice3d0>) -> tensor<1x64xi32, #block3> + %broadcast_off0_scalar = tt.broadcast %off0 : (tensor<16x1xi32, #block2>) -> tensor<16x64xi32, #block2> + %cst_scalar = arith.constant 64 : i32 + %cst = tt.splat %cst_scalar : (i32) -> tensor<16x64xi32, #block2> + %broadcast_off0_ = arith.muli %broadcast_off0_scalar, %cst : tensor<16x64xi32, #block2> + %broadcast_off1_ = tt.broadcast %off1 : (tensor<1x64xi32, #block3>) -> tensor<16x64xi32, #block3> + %broadcast_off0 = triton_gpu.convert_layout %broadcast_off0_ : (tensor<16x64xi32, #block2>) -> tensor<16x64xi32, #AL> + %broadcast_off1 = triton_gpu.convert_layout %broadcast_off1_ : (tensor<16x64xi32, #block3>) -> tensor<16x64xi32, #AL> + %off = arith.addi %broadcast_off0, %broadcast_off1 : tensor<16x64xi32, #AL> + %a_init = tt.splat %arg0 : (!tt.ptr) -> tensor<16x64x!tt.ptr, #AL> + %a_ptr = tt.addptr %a_init, %off : tensor<16x64x!tt.ptr, #AL>, tensor<16x64xi32, #AL> + %tensor = triton_gpu.alloc_tensor : tensor<2x16x64xf32, #A> + %index = arith.constant 1 : i32 + + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att + // CHECK-SAME: cp.async.cg.shared.global [ ${{.*}} + 0 ], [ ${{.*}} + 0 ], 0x10, 0x10 + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att + // CHECK-SAME: cp.async.cg.shared.global [ ${{.*}} + 16 ], [ ${{.*}} + 0 ], 0x10, 0x10 + // CHECK: llvm.inline_asm has_side_effects asm_dialect = att + // CHECK-SAME: cp.async.commit_group + %a = triton_gpu.insert_slice_async %a_ptr, %tensor, %index {axis = 0 : i32, cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<16x64x!tt.ptr, #AL> -> tensor<2x16x64xf32, #A> + return + } +} + +// ----- + +#block0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [4], warpsPerCTA = [4], order = [0]}> +#block1 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [8], warpsPerCTA = [4], order = [0]}> +#block2 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [4, 1], warpsPerCTA = [4, 1], order = [1, 0]}> +#block3 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [1, 8], warpsPerCTA = [1, 4], order = [1, 0]}> +#slice2d1 = #triton_gpu.slice<{dim = 1, parent=#block2}> +#slice3d0 = #triton_gpu.slice<{dim = 0, parent=#block3}> +#AL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [4, 8], warpsPerCTA = [4, 1], order = [1, 0]}> +#A = #triton_gpu.shared<{vec = 1, perPhase = 1, maxPhase = 4, order = [1, 0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: basic_insert_slice_async_v1 + func @basic_insert_slice_async_v1(%arg0: !tt.ptr {tt.divisibility = 4 : i32}) { + %off0_ = tt.make_range {end = 16 : i32, start = 0 : i32} : tensor<16xi32, #slice2d1> + %off1_ = tt.make_range {end = 32 : i32, start = 0 : i32} : tensor<32xi32, #slice3d0> + %off0 = tt.expand_dims %off0_ {axis = 1 : i32} : (tensor<16xi32, #slice2d1>) -> tensor<16x1xi32, #block2> + %off1 = tt.expand_dims %off1_ {axis = 0 : i32} : (tensor<32xi32, #slice3d0>) -> tensor<1x32xi32, #block3> + %broadcast_off0_scalar = tt.broadcast %off0 : (tensor<16x1xi32, #block2>) -> tensor<16x32xi32, #block2> + %cst_scalar = arith.constant 32 : i32 + %cst = tt.splat %cst_scalar : (i32) -> tensor<16x32xi32, #block2> + %broadcast_off0_ = arith.muli %broadcast_off0_scalar, %cst : tensor<16x32xi32, #block2> + %broadcast_off1_ = tt.broadcast %off1 : (tensor<1x32xi32, #block3>) -> tensor<16x32xi32, #block3> + %broadcast_off0 = triton_gpu.convert_layout %broadcast_off0_ : (tensor<16x32xi32, #block2>) -> tensor<16x32xi32, #AL> + %broadcast_off1 = triton_gpu.convert_layout %broadcast_off1_ : (tensor<16x32xi32, #block3>) -> tensor<16x32xi32, #AL> + %off = arith.addi %broadcast_off0, %broadcast_off1 : tensor<16x32xi32, #AL> + %a_init = tt.splat %arg0 : (!tt.ptr) -> tensor<16x32x!tt.ptr, #AL> + %a_ptr = tt.addptr %a_init, %off : tensor<16x32x!tt.ptr, #AL>, tensor<16x32xi32, #AL> + %tensor = triton_gpu.alloc_tensor : tensor<2x16x32xf32, #A> + %index = arith.constant 1 : i32 + + // CHECK: llvm.inline_asm + // CHECK-SAME: cp.async.ca.shared.global [ ${{.*}} + 0 ], [ ${{.*}} + 0 ], 0x4, 0x4 + // CHECK: llvm.inline_asm + // CHECK-SAME: cp.async.ca.shared.global [ ${{.*}} + 0 ], [ ${{.*}} + 0 ], 0x4, 0x4 + // CHECK: llvm.inline_asm + // CHECK-SAME: cp.async.ca.shared.global [ ${{.*}} + 0 ], [ ${{.*}} + 0 ], 0x4, 0x4 + // CHECK: llvm.inline_asm + // CHECK-SAME: cp.async.ca.shared.global [ ${{.*}} + 0 ], [ ${{.*}} + 0 ], 0x4, 0x4 + // CHECK: llvm.inline_asm + // CHECK-SAME: cp.async.commit_group + %a = triton_gpu.insert_slice_async %a_ptr, %tensor, %index {axis = 0 : i32, cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<16x32x!tt.ptr, #AL> -> tensor<2x16x32xf32, #A> + return + } +} + +// ----- + +#block0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [8], warpsPerCTA = [4], order = [0]}> +#block2 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [8, 1], warpsPerCTA = [4, 1], order = [1, 0]}> +#block3 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [1, 8], warpsPerCTA = [1, 4], order = [1, 0]}> +#slice2d1 = #triton_gpu.slice<{dim = 1, parent=#block2}> +#slice3d0 = #triton_gpu.slice<{dim = 0, parent=#block3}> +#AL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [4, 8], warpsPerCTA = [4, 1], order = [1, 0]}> +#A = #triton_gpu.shared<{vec = 1, perPhase = 1, maxPhase = 4, order = [1, 0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: basic_insert_slice_async_v1_multictas + func @basic_insert_slice_async_v1_multictas(%arg0: !tt.ptr {tt.divisibility = 4 : i32}) { + %off0_ = tt.make_range {end = 32 : i32, start = 0 : i32} : tensor<32xi32, #slice2d1> + %off1_ = tt.make_range {end = 32 : i32, start = 0 : i32} : tensor<32xi32, #slice3d0> + %off0 = tt.expand_dims %off0_ {axis = 1 : i32} : (tensor<32xi32, #slice2d1>) -> tensor<32x1xi32, #block2> + %off1 = tt.expand_dims %off1_ {axis = 0 : i32} : (tensor<32xi32, #slice3d0>) -> tensor<1x32xi32, #block3> + %broadcast_off0_scalar = tt.broadcast %off0 : (tensor<32x1xi32, #block2>) -> tensor<32x32xi32, #block2> + %cst_scalar = arith.constant 32 : i32 + %cst = tt.splat %cst_scalar : (i32) -> tensor<32x32xi32, #block2> + %broadcast_off0_ = arith.muli %broadcast_off0_scalar, %cst : tensor<32x32xi32, #block2> + %broadcast_off1_ = tt.broadcast %off1 : (tensor<1x32xi32, #block3>) -> tensor<32x32xi32, #block3> + %broadcast_off0 = triton_gpu.convert_layout %broadcast_off0_ : (tensor<32x32xi32, #block2>) -> tensor<32x32xi32, #AL> + %broadcast_off1 = triton_gpu.convert_layout %broadcast_off1_ : (tensor<32x32xi32, #block3>) -> tensor<32x32xi32, #AL> + %off = arith.addi %broadcast_off0, %broadcast_off1 : tensor<32x32xi32, #AL> + %a_init = tt.splat %arg0 : (!tt.ptr) -> tensor<32x32x!tt.ptr, #AL> + %a_ptr = tt.addptr %a_init, %off : tensor<32x32x!tt.ptr, #AL>, tensor<32x32xi32, #AL> + %tensor = triton_gpu.alloc_tensor : tensor<2x32x32xf32, #A> + %index = arith.constant 1 : i32 + + // CHECK: llvm.mlir.constant(0 : i32) : i32 + // CHECK: llvm.add + // CHECK: llvm.inline_asm + // CHECK-SAME: cp.async.ca.shared.global [ ${{.*}} + 0 ], [ ${{.*}} + 0 ], 0x4, 0x4 + // CHECK: llvm.mlir.constant(0 : i32) : i32 + // CHECK: llvm.add + // CHECK: llvm.inline_asm + // CHECK-SAME: cp.async.ca.shared.global [ ${{.*}} + 0 ], [ ${{.*}} + 0 ], 0x4, 0x4 + // CHECK: llvm.mlir.constant(0 : i32) : i32 + // CHECK: llvm.add + // CHECK: llvm.inline_asm + // CHECK-SAME: cp.async.ca.shared.global [ ${{.*}} + 0 ], [ ${{.*}} + 0 ], 0x4, 0x4 + // CHECK: llvm.mlir.constant(0 : i32) : i32 + // CHECK: llvm.add + // CHECK: llvm.inline_asm + // CHECK-SAME: cp.async.ca.shared.global [ ${{.*}} + 0 ], [ ${{.*}} + 0 ], 0x4, 0x4 + // CHECK: llvm.mlir.constant(16 : i32) : i32 + // CHECK: llvm.add + // CHECK: llvm.inline_asm + // CHECK-SAME: cp.async.ca.shared.global [ ${{.*}} + 0 ], [ ${{.*}} + 0 ], 0x4, 0x4 + // CHECK: llvm.mlir.constant(16 : i32) : i32 + // CHECK: llvm.add + // CHECK: llvm.inline_asm + // CHECK-SAME: cp.async.ca.shared.global [ ${{.*}} + 0 ], [ ${{.*}} + 0 ], 0x4, 0x4 + // CHECK: llvm.mlir.constant(16 : i32) : i32 + // CHECK: llvm.add + // CHECK: llvm.inline_asm + // CHECK-SAME: cp.async.ca.shared.global [ ${{.*}} + 0 ], [ ${{.*}} + 0 ], 0x4, 0x4 + // CHECK: llvm.mlir.constant(16 : i32) : i32 + // CHECK: llvm.add + // CHECK: llvm.inline_asm + // CHECK-SAME: cp.async.ca.shared.global [ ${{.*}} + 0 ], [ ${{.*}} + 0 ], 0x4, 0x4 + // CHECK: llvm.inline_asm + // CHECK-SAME: cp.async.commit_group + %a = triton_gpu.insert_slice_async %a_ptr, %tensor, %index {axis = 0 : i32, cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<32x32x!tt.ptr, #AL> -> tensor<2x32x32xf32, #A> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK: basic_splat + func @basic_splat(%ptr: !tt.ptr) { + // CHECK: llvm.mlir.undef + // CHECK: llvm.insertvalue + // CHECK: llvm.insertvalue + %0 = tt.splat %ptr : (!tt.ptr) -> tensor<256x!tt.ptr,#blocked0> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: basic_store + func @basic_store(%ptrs: tensor<256x!tt.ptr, #blocked0>, %vals: tensor<256xf32, #blocked0>, %mask: tensor<256xi1, #blocked0>) { + // CHECK: llvm.inline_asm + // CHECK-SAME: st.global.b32 [ ${{.*}} + 0 ], { ${{.*}} }; + // CHECK: llvm.inline_asm + // CHECK-SAME: st.global.b32 [ ${{.*}} + 0 ], { ${{.*}} }; + tt.store %ptrs, %vals, %mask : tensor<256xf32, #blocked0> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [8, 4], warpsPerCTA = [1, 1], order = [1, 0]}> +#blocked1 = #triton_gpu.blocked<{sizePerThread = [4, 1], threadsPerWarp = [4, 8], warpsPerCTA = [1, 1], order = [0, 1]}> +module attributes {"triton_gpu.num-warps" = 1 : i32} { + // CHECK: llvm.mlir.global external @global_smem() {addr_space = 3 : i32} : !llvm.array<0 x i8> + // CHECK-LABEL: convert_layout_blocked_blocked + func @convert_layout_blocked_blocked(%arg0: tensor<16x16xf32, #blocked0>) { + // CHECK: llvm.mlir.addressof @global_smem + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: nvvm.barrier0 + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + %0 = triton_gpu.convert_layout %arg0 : (tensor<16x16xf32, #blocked0>) -> tensor<16x16xf32, #blocked1> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [8, 4], warpsPerCTA = [1, 1], order = [1, 0]}> +#blocked1 = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [16, 2], warpsPerCTA = [1, 1], order = [1, 0]}> +module attributes {"triton_gpu.num-warps" = 1 : i32} { + // CHECK: llvm.mlir.global external @global_smem() {addr_space = 3 : i32} : !llvm.array<0 x i8> + // CHECK-LABEL: convert_layout_blocked_blocked_vec + func @convert_layout_blocked_blocked_vec(%arg0: tensor<16x16xf32, #blocked0>) { + // CHECK: llvm.mlir.addressof @global_smem + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: nvvm.barrier0 + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + %0 = triton_gpu.convert_layout %arg0 : (tensor<16x16xf32, #blocked0>) -> tensor<16x16xf32, #blocked1> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [8, 4], warpsPerCTA = [1, 1], order = [1, 0]}> +#blocked1 = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [4, 8], warpsPerCTA = [1, 1], order = [1, 0]}> +module attributes {"triton_gpu.num-warps" = 1 : i32} { + // CHECK: llvm.mlir.global external @global_smem() {addr_space = 3 : i32} : !llvm.array<0 x i8> + // CHECK-LABEL: convert_layout_blocked_blocked_multi_rep + func @convert_layout_blocked_blocked_multi_rep(%arg0: tensor<16x16xf32, #blocked0>) { + // CHECK: llvm.mlir.addressof @global_smem + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: nvvm.barrier0 + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: nvvm.barrier0 + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: nvvm.barrier0 + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + %0 = triton_gpu.convert_layout %arg0 : (tensor<16x16xf32, #blocked0>) -> tensor<16x16xf32, #blocked1> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [8, 4], warpsPerCTA = [1, 1], order = [1, 0]}> +#shared0 = #triton_gpu.shared<{vec = 1, perPhase=2, maxPhase=8 ,order = [1, 0]}> +#mma0 = #triton_gpu.mma<{versionMajor=2, warpsPerCTA=[1,1]}> +#dot_operand_a = #triton_gpu.dot_op<{opIdx=0, parent=#mma0}> +#dot_operand_b = #triton_gpu.dot_op<{opIdx=1, parent=#mma0}> +module attributes {"triton_gpu.num-warps" = 1 : i32} { + // CHECK-LABEL: convert_dot + func @convert_dot(%A: tensor<16x16xf16, #blocked0>, %B: tensor<16x16xf16, #blocked0>) { + %AA = triton_gpu.convert_layout %A : (tensor<16x16xf16, #blocked0>) -> tensor<16x16xf16, #shared0> + %BB = triton_gpu.convert_layout %B : (tensor<16x16xf16, #blocked0>) -> tensor<16x16xf16, #shared0> + // CHECK: llvm.inline_asm + // CHECK-SAME: ldmatrix.sync.aligned.m8n8.x4 + // CHECK: llvm.inline_asm + // CHECK-SAME: ldmatrix.sync.aligned.m8n8.x4 + %AA_DOT = triton_gpu.convert_layout %AA : (tensor<16x16xf16, #shared0>) -> tensor<16x16xf16, #dot_operand_a> + %BB_DOT = triton_gpu.convert_layout %BB : (tensor<16x16xf16, #shared0>) -> tensor<16x16xf16, #dot_operand_b> + %cst0 = arith.constant dense<0.000000e+00> : tensor<16x16xf32, #mma0> + + // CHECK: llvm.inline_asm + // CHECK-SAME: mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 + // CHECK: llvm.inline_asm + // CHECK-SAME: mma.sync.aligned.m16n8k16.row.col.f32.f16.f16.f32 + %D = tt.dot %AA_DOT, %BB_DOT, %cst0 {allowTF32 = true, transA = false, transB = false} : tensor<16x16xf16, #dot_operand_a> * tensor<16x16xf16, #dot_operand_b> -> tensor<16x16xf32, #mma0> + + return + } +} + +// TODO: problems in MLIR's parser on slice layout +// #blocked0 = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [8, 4], warpsPerCTA = [1, 1], order = [1, 0]}> +// module attributes {"triton_gpu.num-warps" = 1 : i32} { +// func @make_range_sliced_layout() { +// %0 = tt.make_range {end = 16 : i32, start = 0 : i32} : tensor<16xi32, #triton_gpu.slice<{dim = 0, parent = #blocked0}>> +// return +// } +// } + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [32, 1], warpsPerCTA = [1, 4], order = [1, 0]}> +#mma = #triton_gpu.mma<{versionMajor = 2, warpsPerCTA = [2, 2]}> +module attributes {"triton_gpu.num-warps" = 1 : i32} { + // CHECK: llvm.mlir.global external @global_smem() {addr_space = 3 : i32} : !llvm.array<0 x i8> + // CHECK-LABEL: convert_layout_mmav2_block + func @convert_layout_mmav2_blocked(%arg0: tensor<32x16xf32, #mma>) { + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: nvvm.barrier0 + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + %0 = triton_gpu.convert_layout %arg0 : (tensor<32x16xf32, #mma>) -> tensor<32x16xf32, #blocked0> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [32, 1], warpsPerCTA = [1, 4], order = [1, 0]}> +#mma = #triton_gpu.mma<{versionMajor = 1, warpsPerCTA = [2, 1]}> +module attributes {"triton_gpu.num-warps" = 1 : i32} { + // CHECK: llvm.mlir.global external @global_smem() {addr_space = 3 : i32} : !llvm.array<0 x i8> + // CHECK-LABEL: convert_layout_mmav1_block + func @convert_layout_mmav1_blocked(%arg0: tensor<32x16xf32, #mma>) { + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: nvvm.barrier0 + // CHECK: llvm.load + // CHECK-SAME: !llvm.ptr, 3> + %0 = triton_gpu.convert_layout %arg0 : (tensor<32x16xf32, #mma>) -> tensor<32x16xf32, #blocked0> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1, 8], threadsPerWarp = [8, 4], warpsPerCTA = [8, 1], order = [1, 0]}> +#shared0 = #triton_gpu.shared<{vec = 8, perPhase = 2, maxPhase = 4, order = [1, 0]}> +module attributes {"triton_gpu.num-warps" = 1 : i32} { + // CHECK: llvm.mlir.global external @global_smem() {addr_space = 3 : i32} : !llvm.array<0 x i8> + // CHECK-LABEL: convert_layout_blocked_shared + func @convert_layout_blocked_shared(%arg0: tensor<128x32xf32, #blocked0>) { + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + // CHECK: llvm.store + // CHECK-SAME: !llvm.ptr, 3> + %0 = triton_gpu.convert_layout %arg0 : (tensor<128x32xf32, #blocked0>) -> tensor<128x32xf32, #shared0> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [1], order = [0]}> +#blocked1 = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [4, 8], warpsPerCTA = [1, 1], order = [1, 0]}> +module attributes {"triton_gpu.num-warps" = 1 : i32} { + // CHECK-LABEL: convert_blocked1d_to_slice0 + func @convert_blocked1d_to_slice0(%src:tensor<32xi32, #blocked0>) { + // CHECK-COUNT-4: llvm.load {{.*}} : !llvm.ptr, 3> + %cvt = triton_gpu.convert_layout %src : (tensor<32xi32, #blocked0>) -> tensor<32xi32, #triton_gpu.slice<{dim = 0, parent = #blocked1}>> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [1], order = [0]}> +#blocked1 = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [4, 8], warpsPerCTA = [1, 1], order = [1, 0]}> +module attributes {"triton_gpu.num-warps" = 1 : i32} { + // CHECK-LABEL: convert_blocked1d_to_slice1 + func @convert_blocked1d_to_slice1(%src:tensor<32xi32, #blocked0>) { + // CHECK-COUNT-32: llvm.load {{.*}} : !llvm.ptr, 3> + %cvt = triton_gpu.convert_layout %src : (tensor<32xi32, #blocked0>) -> tensor<32xi32, #triton_gpu.slice<{dim = 1, parent = #blocked1}>> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [1], order = [0]}> +#blocked1 = #triton_gpu.blocked<{sizePerThread = [4], threadsPerWarp = [32], warpsPerCTA = [1], order = [0]}> +module attributes {"triton_gpu.num-warps" = 1 : i32} { + // CHECK-LABEL: convert_blocked_to_blocked_ptr + func @convert_blocked_to_blocked_ptr(%src:tensor<32x!tt.ptr, #blocked0>) { + // CHECK: llvm.ptrtoint + // CHECK: llvm.store + // CHECK: nvvm.barrier0 + // CHECK: llvm.inttoptr + // CHECK-COUNT-4: llvm.insertvalue + %cvt = triton_gpu.convert_layout %src : (tensor<32x!tt.ptr, #blocked0>) -> tensor<32x!tt.ptr, #blocked1> + return + } +} + +// ----- + +#blocked = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [2, 16], warpsPerCTA = [1, 4], order = [1, 0]}> +#shared = #triton_gpu.shared<{vec = 1, perPhase = 1, maxPhase = 1, order = [1, 0]}> +#mma = #triton_gpu.mma<{versionMajor = 2, warpsPerCTA = [2, 2]}> +#dot_operand_a = #triton_gpu.dot_op<{opIdx=0, parent=#mma}> +#dot_operand_b = #triton_gpu.dot_op<{opIdx=1, parent=#mma}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + func @matmul_kernel_dot_operand_layout(%ptr:!tt.ptr {tt.divisibility = 16 : i32}, + %a:tensor<128x32xf16, #shared>, %b:tensor<32x256xf16, #shared>) { + %cst = arith.constant dense<0.000000e+00> : tensor<128x256xf32, #mma> + // CHECK: ldmatrix.sync.aligned.m8n8.x4.shared.b16 + %a_mat = triton_gpu.convert_layout %a : (tensor<128x32xf16, #shared>) -> tensor<128x32xf16, #dot_operand_a> + %b_mat = triton_gpu.convert_layout %b : (tensor<32x256xf16, #shared>) -> tensor<32x256xf16, #dot_operand_b> + + %28 = tt.dot %a_mat, %b_mat, %cst {allowTF32 = true, transA = false, transB = false} : tensor<128x32xf16, #dot_operand_a> * tensor<32x256xf16, #dot_operand_b> -> tensor<128x256xf32, #mma> + %38 = triton_gpu.convert_layout %28 : (tensor<128x256xf32, #mma>) -> tensor<128x256xf32, #blocked> + + %30 = tt.splat %ptr : (!tt.ptr) -> tensor<128x1x!tt.ptr, #blocked> + %36 = tt.broadcast %30 : (tensor<128x1x!tt.ptr, #blocked>) -> tensor<128x256x!tt.ptr, #blocked> + tt.store %36, %38 : tensor<128x256xf32, #blocked> + return + } +} + +// ----- + +#blocked = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [2, 16], warpsPerCTA = [1, 4], order = [1, 0]}> +#shared = #triton_gpu.shared<{vec = 1, perPhase = 1, maxPhase = 1, order = [1, 0]}> +#mma = #triton_gpu.mma<{versionMajor = 1, warpsPerCTA = [2, 2]}> +#dot_operand_a = #triton_gpu.dot_op<{opIdx=0, parent=#mma, isMMAv1Row=true}> +#dot_operand_b = #triton_gpu.dot_op<{opIdx=1, parent=#mma, isMMAv1Row=true}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + func @matmul884_kernel_dot_operand_layout(%ptr:!tt.ptr {tt.divisibility = 16 : i32}, + %a:tensor<128x32xf16, #shared>, %b:tensor<32x256xf16, #shared>) { + %cst = arith.constant dense<0.000000e+00> : tensor<128x256xf32, #mma> + // CHECK: ldmatrix.sync.aligned.m8n8.x4.shared.b16 + %a_mat = triton_gpu.convert_layout %a : (tensor<128x32xf16, #shared>) -> tensor<128x32xf16, #dot_operand_a> + %b_mat = triton_gpu.convert_layout %b : (tensor<32x256xf16, #shared>) -> tensor<32x256xf16, #dot_operand_b> + + %28 = tt.dot %a_mat, %b_mat, %cst {allowTF32 = true, transA = false, transB = false} : tensor<128x32xf16, #dot_operand_a> * tensor<32x256xf16, #dot_operand_b> -> tensor<128x256xf32, #mma> + // TODO[goostavz]: uncomment the following lines after convert_layout[mma -> blocked] is ready. + // %38 = triton_gpu.convert_layout %28 : (tensor<128x256xf32, #mma>) -> tensor<128x256xf32, #blocked> + // %30 = tt.splat %ptr : (!tt.ptr) -> tensor<128x1x!tt.ptr, #blocked> + // %36 = tt.broadcast %30 : (tensor<128x1x!tt.ptr, #blocked>) -> tensor<128x256x!tt.ptr, #blocked> + // tt.store %36, %38 : tensor<128x256xf32, #blocked> + return + } +} + +// ----- + +#blocked = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [2, 16], warpsPerCTA = [1, 4], order = [1, 0]}> +#shared = #triton_gpu.shared<{vec = 1, perPhase = 1, maxPhase = 1, order = [1, 0]}> +#dot_operand_a = #triton_gpu.dot_op<{opIdx=0, parent=#blocked}> +#dot_operand_b = #triton_gpu.dot_op<{opIdx=1, parent=#blocked}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + func @matmul_fmadot(%ptr:!tt.ptr {tt.divisibility = 16 : i32}, + %a:tensor<32x16xf32, #shared>, %b:tensor<16x32xf32, #shared>) { + %cst = arith.constant dense<0.000000e+00> : tensor<32x32xf32, #blocked> + // CHECK: llvm.intr.fmuladd + %a_mat = triton_gpu.convert_layout %a : (tensor<32x16xf32, #shared>) -> tensor<32x16xf32, #dot_operand_a> + %b_mat = triton_gpu.convert_layout %b : (tensor<16x32xf32, #shared>) -> tensor<16x32xf32, #dot_operand_b> + + %28 = tt.dot %a_mat, %b_mat, %cst {allowTF32 = false, transA = false, transB = false} : tensor<32x16xf32, #dot_operand_a> * tensor<16x32xf32, #dot_operand_b> -> tensor<32x32xf32, #blocked> + %30 = tt.splat %ptr : (!tt.ptr) -> tensor<32x1x!tt.ptr, #blocked> + %36 = tt.broadcast %30 : (tensor<32x1x!tt.ptr, #blocked>) -> tensor<32x32x!tt.ptr, #blocked> + tt.store %36, %28 : tensor<32x32xf32, #blocked> + return + } +} + +// ----- + +#mma = #triton_gpu.mma<{versionMajor=2, warpsPerCTA=[2, 2]}> +#shared = #triton_gpu.shared<{vec = 1, perPhase = 1, maxPhase = 1, order = [1, 0]}> +#blocked = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [2, 16], warpsPerCTA = [1, 4], order = [1, 0]}> +#dot_operand_a = #triton_gpu.dot_op<{opIdx=0, parent=#mma}> +#dot_operand_b = #triton_gpu.dot_op<{opIdx=1, parent=#mma}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: matmul_tf32dot + func @matmul_tf32dot(%ptr:!tt.ptr {tt.divisibility = 16 : i32}, + %a:tensor<32x16xf32, #shared>, %b:tensor<16x32xf32, #shared>) { + %cst = arith.constant dense<0.000000e+00> : tensor<32x32xf32, #mma> + // CHECK: llvm.inline_asm + // CHECK-SAME: ldmatrix.sync.aligned.m8n8.x4.shared.b16 + // CHECK-SAME: (f32, f32, f32, f32) + // CHECK: llvm.inline_asm + // CHECK-SAME: ldmatrix.sync.aligned.m8n8.x4.shared.b16 + // CHECK-SAME: (f32, f32, f32, f32) + %a_mat = triton_gpu.convert_layout %a : (tensor<32x16xf32, #shared>) -> tensor<32x16xf32, #dot_operand_a> + %b_mat = triton_gpu.convert_layout %b : (tensor<16x32xf32, #shared>) -> tensor<16x32xf32, #dot_operand_b> + + // CHECK: llvm.inline_asm + // CHECK-SAME: mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32 + // CHECK: llvm.inline_asm + // CHECK-SAME: mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32 + // CHECK: llvm.inline_asm + // CHECK-SAME: mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32 + // CHECK: llvm.inline_asm + // CHECK-SAME: mma.sync.aligned.m16n8k8.row.col.f32.tf32.tf32.f32 + %28 = tt.dot %a_mat, %b_mat, %cst {allowTF32 = true, transA = false, transB = false} : tensor<32x16xf32, #dot_operand_a> * tensor<16x32xf32, #dot_operand_b> -> tensor<32x32xf32, #mma> + %38 = triton_gpu.convert_layout %28 : (tensor<32x32xf32, #mma>) -> tensor<32x32xf32, #blocked> + + %30 = tt.splat %ptr : (!tt.ptr) -> tensor<32x1x!tt.ptr, #blocked> + %36 = tt.broadcast %30 : (tensor<32x1x!tt.ptr, #blocked>) -> tensor<32x32x!tt.ptr, #blocked> + tt.store %36, %38 : tensor<32x32xf32, #blocked> + return + } +} + +// ----- + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + // CHECK-LABEL: atomic_add_f32 + func @atomic_add_f32(%arg0 : tensor<256x!tt.ptr, #blocked0>, %arg1 : tensor<256xi1, #blocked0>, %arg2 : tensor<256xf32, #blocked0>) { + // CHECK: llvm.inline_asm + // CHECK-SAME: atom.global.gpu.add.f32 + %0 = "tt.atomic_rmw" (%arg0, %arg2, %arg1) {atomic_rmw_op = 5 : i32} : (tensor<256x!tt.ptr, #blocked0>, tensor<256xf32, #blocked0>, tensor<256xi1, #blocked0>) -> tensor<256xf32, #blocked0> + return + } +} + +// ----- +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + +func @test_get_program_id(%a: tensor<32x!tt.ptr, #blocked0>) { + %blockidx = tt.get_program_id {axis=0:i32} : i32 + %blockidy = tt.get_program_id {axis=1:i32} : i32 + %blockidz = tt.get_program_id {axis=2:i32} : i32 + // CHECK: nvvm.read.ptx.sreg.ctaid.x + // CHECK: nvvm.read.ptx.sreg.ctaid.y + // CHECK: nvvm.read.ptx.sreg.ctaid.z + %v0 = arith.addi %blockidx, %blockidy : i32 + %v1 = arith.addi %v0, %blockidz : i32 + %0 = tt.splat %v1 : (i32) -> tensor<32xi32, #blocked0> + tt.store %a, %0 : tensor<32xi32, #blocked0> + + return +} + +} + +// ----- +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}> +module attributes {"triton_gpu.num-warps" = 4 : i32} { + +func @test_get_num_program(%a: tensor<32x!tt.ptr, #blocked0>) { + // CHECK: nvvm.read.ptx.sreg.nctaid.x + // CHECK: nvvm.read.ptx.sreg.nctaid.y + // CHECK: nvvm.read.ptx.sreg.nctaid.z + %blockdimx = tt.get_num_programs {axis=0:i32} : i32 + %blockdimy = tt.get_num_programs {axis=1:i32} : i32 + %blockdimz = tt.get_num_programs {axis=2:i32} : i32 + %v0 = arith.addi %blockdimx, %blockdimy : i32 + %v1 = arith.addi %v0, %blockdimz : i32 + %0 = tt.splat %v1 : (i32) -> tensor<32xi32, #blocked0> + tt.store %a, %0 : tensor<32xi32, #blocked0> + + return +} + +} diff --git a/test/Target/tritongpu_to_llvmir.mlir b/test/Target/tritongpu_to_llvmir.mlir new file mode 100644 index 000000000000..cafff3ca608c --- /dev/null +++ b/test/Target/tritongpu_to_llvmir.mlir @@ -0,0 +1,16 @@ +// RUN: %PYTHON -m triton.tools.aot %s --target=llvm-ir --sm=80 | FileCheck %s + +// == LLVM IR check begin == +// CHECK-LABEL: ; ModuleID = 'LLVMDialectModule' +// CHECK: define void @test_empty_kernel +// CHECK: !nvvm.annotations +// CHECK: !{void (i32, half addrspace(1)*)* @test_empty_kernel, !"maxntidx", i32 128} + +module attributes {"triton_gpu.num-warps" = 4 : i32} { + +func @test_empty_kernel(%lb : index, %A : !tt.ptr) { + + return +} + +} diff --git a/test/Target/tritongpu_to_ptx.mlir b/test/Target/tritongpu_to_ptx.mlir new file mode 100644 index 000000000000..404e970a292e --- /dev/null +++ b/test/Target/tritongpu_to_ptx.mlir @@ -0,0 +1,14 @@ +// RUN: %PYTHON -m triton.tools.aot %s --target=ptx --sm=80 --ptx-version=63 | FileCheck %s +// CHECK-LABEL: // Generated by LLVM NVPTX Back-End +// CHECK: .version 6.3 +// CHECK: .target sm_80 +// CHECK: .address_size 64 + +module attributes {"triton_gpu.num-warps" = 4 : i32} { + +func @test_empty_kernel(%lb : index, %A : !tt.ptr) { + + return +} + +} diff --git a/test/Triton/combine.mlir b/test/Triton/combine.mlir new file mode 100644 index 000000000000..c8c1f29627a0 --- /dev/null +++ b/test/Triton/combine.mlir @@ -0,0 +1,146 @@ +// RUN: triton-opt %s -split-input-file -canonicalize -triton-combine +// RUN: triton-opt %s -split-input-file -canonicalize -triton-combine | FileCheck %s + +// CHECK-LABEL: @test_combine_dot_add_pattern +func @test_combine_dot_add_pattern() -> (tensor<128x128xf32>, tensor<128x128xf32>) { + // CHECK: %[[d:.*]] = arith.constant dense<3.000000e+00> : tensor<128x128xf32> + // CHECK: %[[b:.*]] = arith.constant dense<2.000000e+00> : tensor<128x128xf32> + // CHECK: %[[a:.*]] = arith.constant dense<1.000000e+00> : tensor<128x128xf32> + %a = arith.constant dense<1.0> : tensor<128x128xf32> + %b = arith.constant dense<2.0> : tensor<128x128xf32> + %zero = arith.constant dense<0.0> : tensor<128x128xf32> + %d = arith.constant dense<3.0> : tensor<128x128xf32> + + %dot_out = tt.dot %a, %b, %zero {allowTF32 = true, transA = false, transB = false} : tensor<128x128xf32> * tensor<128x128xf32> -> tensor<128x128xf32> + + // CHECK-NEXT: %[[res0:.*]] = tt.dot %[[a]], %[[b]], %[[d]] {allowTF32 = true} : tensor<128x128xf32> * tensor<128x128xf32> -> tensor<128x128xf32> + %res0 = arith.addf %dot_out, %d : tensor<128x128xf32> + + // CHECK-NEXT: %[[res1:.*]] = tt.dot %[[a]], %[[b]], %[[d]] {allowTF32 = true} : tensor<128x128xf32> * tensor<128x128xf32> -> tensor<128x128xf32> + %res1 = arith.addf %d, %dot_out : tensor<128x128xf32> + + return %res0, %res1 : tensor<128x128xf32>, tensor<128x128xf32> +} + + +// COM: CHECK-LABEL: @test_combine_addptr_pattern +func @test_combine_addptr_pattern(%base: !tt.ptr) -> tensor<8x!tt.ptr> { + %off0 = arith.constant 10 : i32 + %off1 = arith.constant 15 : i32 + + // 10 + 15 = 25 + // COM: CHECK-NEXT: %[[cst:.*]] = arith.constant dense<25> : tensor<8xi32> + + %base_ = tt.broadcast %base : (!tt.ptr) -> tensor<8x!tt.ptr> + + // COM: CHECK-NEXT: %[[tmp0:.*]] = tt.broadcast %{{.*}} : (!tt.ptr) -> tensor<8x!tt.ptr> + + %idx0 = tt.broadcast %off0 : (i32) -> tensor<8xi32> + %idx1 = tt.broadcast %off1 : (i32) -> tensor<8xi32> + + // COM: CHECK-NEXT: %1 = tt.addptr %[[tmp0]], %[[cst]] : tensor<8x!tt.ptr>, tensor<8xi32> + %ptr0 = tt.addptr %base_, %idx0 : tensor<8x!tt.ptr>, tensor<8xi32> + %ptr1 = tt.addptr %ptr0, %idx1 : tensor<8x!tt.ptr>, tensor<8xi32> + + return %ptr1 : tensor<8x!tt.ptr> +} + + +// CHECK-LABEL: @test_combine_select_masked_load_pattern +func @test_combine_select_masked_load_pattern(%ptr: tensor<8x!tt.ptr>, %cond: i1) -> (tensor<8xf32>, tensor<8xf32>) { + %mask = tt.broadcast %cond : (i1) -> tensor<8xi1> + %false_val = arith.constant dense<0.0> : tensor<8xf32> + + // CHECK: %[[res1:.*]] = tt.load %{{.*}}, %{{.*}}, %{{.*}} {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<8xf32> + %x = tt.load %ptr, %mask, %false_val {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<8xf32> + %0 = select %cond, %x, %false_val : tensor<8xf32> + + // CHECK: %[[res2:.*]] = tt.load %{{.*}}, %{{.*}}, %{{.*}} {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<8xf32> + %y = tt.load %ptr, %mask, %false_val {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<8xf32> + %1 = select %cond, %y, %false_val : tensor<8xf32> + + // CHECK: return %[[res1]], %[[res2]] : tensor<8xf32>, tensor<8xf32> + return %0, %1 : tensor<8xf32>, tensor<8xf32> +} + +// CHECK-LABEL: @test_combine_select_masked_load_fail_pattern +func @test_combine_select_masked_load_fail_pattern(%ptr: tensor<8x!tt.ptr>, %dummy_load: tensor<8xf32>, %dummy_broadcast: tensor<8xi1>, %cond: i1) -> (tensor<8xf32>, tensor<8xf32>) { + %false_val = arith.constant dense<0.0> : tensor<8xf32> + + // Case 1: value at the "load" position is not an "op". Select should not be canonicalized. + // CHECK: %{{.*}} = select %{{.*}}, %{{.*}}, %{{.*}} : tensor<8xf32> + %0 = select %cond, %dummy_load, %false_val : tensor<8xf32> + + // Case 2: value at the "broadcast" position is not an "op". Select should not be canonicalized. + %real_load = tt.load %ptr, %dummy_broadcast, %false_val {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<8xf32> + // CHECK: %{{.*}} = select %{{.*}}, %{{.*}}, %{{.*}} : tensor<8xf32> + %1 = select %cond, %real_load, %false_val : tensor<8xf32> + + return %0, %1 : tensor<8xf32>, tensor<8xf32> +} + +// CHECK-LABEL: @test_combine_broadcast_constant_pattern +func @test_combine_broadcast_constant_pattern(%cst : f32) -> tensor<8x2xf32> { + // CHECK: %[[cst:.*]] = arith.constant dense<1.000000e+00> : tensor<8x2xf32> + %const = arith.constant dense<1.0> : tensor<8xf32> + %bst_out = tt.broadcast %const : (tensor<8xf32>) -> tensor<8x2xf32> + + // CHECK-NEXT: return %[[cst]] : tensor<8x2xf32> + return %bst_out : tensor<8x2xf32> +} + +// CHECK-LABEL: @test_canonicalize_masked_load_pattern +func @test_canonicalize_masked_load_pattern(%ptr: tensor<8x!tt.ptr>) -> (tensor<8xf32>, tensor<8xf32>, tensor<8xf32>) { + %true_mask = arith.constant dense : tensor<8xi1> + %false_mask = arith.constant dense : tensor<8xi1> + %other_val = arith.constant dense<0.0> : tensor<8xf32> + + // true_mask with other + // CHECK: %[[res1:.*]] = tt.load %{{.*}} {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<8xf32> + %x = tt.load %ptr, %true_mask {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<8xf32> + + // true_mask without other + // CHECK: %[[res2:.*]] = tt.load %{{.*}} {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<8xf32> + %y = tt.load %ptr, %true_mask, %other_val {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<8xf32> + + // false_mask with other. It should become "other" (i.e., %y) + %z = tt.load %ptr, %false_mask, %y {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<8xf32> + + // CHECK: return %[[res1]], %[[res2]], %[[res2]] : tensor<8xf32>, tensor<8xf32>, tensor<8xf32> + return %x, %y, %z: tensor<8xf32>, tensor<8xf32>, tensor<8xf32> +} + +// CHECK-LABEL: @test_canonicalize_masked_load_fail_pattern +func @test_canonicalize_masked_load_fail_pattern(%ptr: tensor<8x!tt.ptr>, %mask: tensor<8xi1>) -> (tensor<8xf32>, tensor<8xf32>) { + %other_val = arith.constant dense<0.0> : tensor<8xf32> + + // Case: value at the "mask" position is not an "op". Load should not be canonicalized. + // CHECK: %[[res1:.*]] = tt.load %{{.*}}, %{{.*}} {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<8xf32> + %x = tt.load %ptr, %mask {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<8xf32> + // CHECK: %[[res1:.*]] = tt.load %{{.*}}, %{{.*}}, %{{.*}} {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<8xf32> + %y = tt.load %ptr, %mask, %other_val {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<8xf32> + + return %x, %y: tensor<8xf32>, tensor<8xf32> +} + +// CHECK-LABEL: @test_canonicalize_masked_store_pattern +func @test_canonicalize_masked_store_pattern(%ptr: tensor<8x!tt.ptr>, %val: tensor<8xf32>) { + %true_mask = arith.constant dense : tensor<8xi1> + %false_mask = arith.constant dense : tensor<8xi1> + + // CHECK: tt.store %{{.*}}, %{{.*}} : tensor<8xf32> + tt.store %ptr, %val, %true_mask : tensor<8xf32> + + // The following store should disappear. + // CHECK-NEXT: return + tt.store %ptr, %val, %false_mask : tensor<8xf32> + return +} + +// CHECK-LABEL: @test_canonicalize_masked_store_fail_pattern +func @test_canonicalize_masked_store_fail_pattern(%ptr: tensor<8x!tt.ptr>, %val: tensor<8xf32>, %mask: tensor<8xi1>) { + // Case: value at the "mask" position is not an "op". Store should not be canonicalized. + // CHECK: tt.store %{{.*}}, %{{.*}}, %{{.*}} : tensor<8xf32> + tt.store %ptr, %val, %mask : tensor<8xf32> + return +} diff --git a/test/Triton/vecadd.mlir b/test/Triton/vecadd.mlir new file mode 100644 index 000000000000..0b69ef3054e8 --- /dev/null +++ b/test/Triton/vecadd.mlir @@ -0,0 +1,130 @@ +// RUN: triton-opt %s -verify-diagnostics + +module { + func @add_kernel__Pfp32_Pfp32_Pfp32_i32_i32_i32__(%arg0: !tt.ptr, %arg1: !tt.ptr, %arg2: !tt.ptr, %arg3: i32, %arg4: i32, %arg5: i32) { + %0 = tt.get_program_id {axis = 0 : i32} : i32 + %c256_i32 = arith.constant 256 : i32 + %1 = arith.muli %0, %c256_i32 : i32 + %2 = tt.make_range {end = 256 : i32, start = 0 : i32} : tensor<256xi32> + %3 = tt.broadcast %1 : (i32) -> tensor<256xi32> + %4 = arith.addi %3, %2 : tensor<256xi32> + %5 = tt.broadcast %arg3 : (i32) -> tensor<256xi32> + %6 = arith.cmpi slt, %4, %5 : tensor<256xi32> + %7 = tt.broadcast %arg0 : (!tt.ptr) -> tensor<256x!tt.ptr> + %8 = tt.addptr %7, %4 : tensor<256x!tt.ptr>, tensor<256xi32> + %9 = tt.broadcast %arg1 : (!tt.ptr) -> tensor<256x!tt.ptr> + %10 = tt.addptr %9, %4 : tensor<256x!tt.ptr>, tensor<256xi32> + %cst = arith.constant 0.000000e+00 : f32 + %11 = tt.broadcast %cst : (f32) -> tensor<256xf32> + %c0_i32 = arith.constant 0 : i32 + %c32_i32 = arith.constant 32 : i32 + %12 = arith.index_cast %c0_i32 : i32 to index + %13 = arith.index_cast %arg4 : i32 to index + %14 = arith.index_cast %c32_i32 : i32 to index + %15:3 = scf.for %arg6 = %12 to %13 step %14 iter_args(%arg7 = %11, %arg8 = %8, %arg9 = %10) -> (tensor<256xf32>, tensor<256x!tt.ptr>, tensor<256x!tt.ptr>) { + %cst_0 = arith.constant 0.000000e+00 : f32 + %18 = tt.broadcast %cst_0 : (f32) -> tensor<256xf32> + %19 = tt.load %arg8, %6, %18 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32> + %cst_1 = arith.constant 0.000000e+00 : f32 + %20 = tt.broadcast %cst_1 : (f32) -> tensor<256xf32> + %21 = tt.load %arg9, %6, %20 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32> + %22 = arith.addf %19, %21 : tensor<256xf32> + %23 = arith.addf %arg7, %22 : tensor<256xf32> + %24 = tt.broadcast %arg5 : (i32) -> tensor<256xi32> + %25 = tt.addptr %arg8, %24 : tensor<256x!tt.ptr>, tensor<256xi32> + %26 = tt.broadcast %arg5 : (i32) -> tensor<256xi32> + %27 = tt.addptr %arg9, %26 : tensor<256x!tt.ptr>, tensor<256xi32> + scf.yield %23, %25, %27 : tensor<256xf32>, tensor<256x!tt.ptr>, tensor<256x!tt.ptr> + } + %16 = tt.broadcast %arg2 : (!tt.ptr) -> tensor<256x!tt.ptr> + %17 = tt.addptr %16, %4 : tensor<256x!tt.ptr>, tensor<256xi32> + tt.store %17, %15#0, %6 : tensor<256xf32> + return + } +} +// module { +// func @add_kernel__Pfp32_Pfp32_Pfp32_i32_i32_i32__(%arg0: !tt.ptr, %arg1: !tt.ptr, %arg2: !tt.ptr, %arg3: i32, %arg4: i32, %arg5: i32) { +// %c64 = arith.constant 64 : index +// %c32 = arith.constant 32 : index +// %c0 = arith.constant 0 : index +// %cst = arith.constant 0.000000e+00 : f32 +// %c256_i32 = arith.constant 256 : i32 +// %0 = tt.get_program_id {axis = 0 : i32} : i32 +// %1 = arith.muli %0, %c256_i32 : i32 +// %2 = tt.make_range {end = 256 : i32, start = 0 : i32} : tensor<256xi32, #triton_gpu<"coalesced encoding">> +// %3 = tt.broadcast %1 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> +// %4 = arith.addi %3, %2 : tensor<256xi32, #triton_gpu<"coalesced encoding">> +// %5 = tt.broadcast %arg3 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> +// %6 = "triton_gpu.cmpi"(%4, %5) {predicate = 2 : i64} : (tensor<256xi32, #triton_gpu<"coalesced encoding">>, tensor<256xi32, #triton_gpu<"coalesced encoding">>) -> tensor<256xi1, #triton_gpu<"coalesced encoding">> +// %7 = tt.broadcast %arg0 : (!tt.ptr) -> tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> +// %8 = tt.addptr %7, %4, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256xi32> +// %9 = tt.broadcast %arg1 : (!tt.ptr) -> tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> +// %10 = tt.addptr %9, %4, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256xi32> +// %11 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %12 = arith.index_cast %arg4 : i32 to index +// %13 = arith.cmpi slt, %c0, %12 : index +// %14 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %15 = tt.broadcast %13 : (i1) -> tensor<256xi1, #triton_gpu<"coalesced encoding">> +// %16 = arith.andi %6, %15 : tensor<256xi1, #triton_gpu<"coalesced encoding">> +// %17 = triton_gpu.copy_async %8, %16, %14 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> -> tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %18 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %19 = tt.broadcast %13 : (i1) -> tensor<256xi1, #triton_gpu<"coalesced encoding">> +// %20 = arith.andi %6, %19 : tensor<256xi1, #triton_gpu<"coalesced encoding">> +// %21 = triton_gpu.copy_async %10, %20, %18 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> -> tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %22 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> +// %23 = tt.addptr %8, %22, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256xi32> +// %24 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> +// %25 = tt.addptr %10, %24, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256xi32> +// %26 = arith.cmpi slt, %c32, %12 : index +// %27 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %28 = tt.broadcast %26 : (i1) -> tensor<256xi1, #triton_gpu<"coalesced encoding">> +// %29 = arith.andi %6, %28 : tensor<256xi1, #triton_gpu<"coalesced encoding">> +// %30 = triton_gpu.copy_async %23, %29, %27 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> -> tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %31 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %32 = tt.broadcast %26 : (i1) -> tensor<256xi1, #triton_gpu<"coalesced encoding">> +// %33 = arith.andi %6, %32 : tensor<256xi1, #triton_gpu<"coalesced encoding">> +// %34 = triton_gpu.copy_async %25, %33, %31 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> -> tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %35 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> +// %36 = tt.addptr %23, %35, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256xi32> +// %37 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> +// %38 = tt.addptr %25, %37, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256xi32> +// %39 = arith.cmpi slt, %c64, %12 : index +// %40 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %41 = tt.broadcast %39 : (i1) -> tensor<256xi1, #triton_gpu<"coalesced encoding">> +// %42 = arith.andi %6, %41 : tensor<256xi1, #triton_gpu<"coalesced encoding">> +// %43 = triton_gpu.copy_async %36, %42, %40 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> -> tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %44 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %45 = tt.broadcast %39 : (i1) -> tensor<256xi1, #triton_gpu<"coalesced encoding">> +// %46 = arith.andi %6, %45 : tensor<256xi1, #triton_gpu<"coalesced encoding">> +// %47 = triton_gpu.copy_async %38, %46, %44 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> -> tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %48 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> +// %49 = tt.addptr %36, %48, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256xi32> +// %50 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> +// %51 = tt.addptr %38, %50, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256xi32> +// %52:12 = scf.for %arg6 = %c0 to %12 step %c32 iter_args(%arg7 = %11, %arg8 = %8, %arg9 = %10, %arg10 = %17, %arg11 = %30, %arg12 = %43, %arg13 = %21, %arg14 = %34, %arg15 = %47, %arg16 = %51, %arg17 = %49, %arg18 = %c64) -> (tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, index) { +// %55 = arith.addf %arg10, %arg13 : tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %56 = arith.addf %arg7, %55 : tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %57 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> +// %58 = tt.addptr %arg8, %57, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256xi32> +// %59 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> +// %60 = tt.addptr %arg9, %59, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256xi32> +// %61 = arith.addi %arg18, %c32 : index +// %62 = arith.cmpi slt, %61, %12 : index +// %63 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %64 = tt.broadcast %62 : (i1) -> tensor<256xi1, #triton_gpu<"coalesced encoding">> +// %65 = arith.andi %64, %6 : tensor<256xi1, #triton_gpu<"coalesced encoding">> +// %66 = triton_gpu.copy_async %arg17, %65, %63 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> -> tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %67 = tt.broadcast %cst : (f32) -> tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %68 = triton_gpu.copy_async %arg16, %65, %67 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> -> tensor<256xf32, #triton_gpu<"coalesced encoding">> +// %69 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> +// %70 = tt.addptr %arg17, %69, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256xi32> +// %71 = tt.broadcast %arg5 : (i32) -> tensor<256xi32, #triton_gpu<"coalesced encoding">> +// %72 = tt.addptr %arg16, %71, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256xi32> +// scf.yield %56, %58, %60, %arg11, %arg12, %66, %arg14, %arg15, %68, %72, %70, %61 : tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256xf32, #triton_gpu<"coalesced encoding">>, tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, index +// } +// %53 = tt.broadcast %arg2 : (!tt.ptr) -> tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">> +// %54 = tt.addptr %53, %4, : tensor<256x!tt.ptr, #triton_gpu<"coalesced encoding">>, tensor<256xi32> +// tt.store %54, %52#0, %6 : tensor<256xf32, #triton_gpu<"coalesced encoding">> +// return +// } +// } diff --git a/test/TritonGPU/coalesce.mlir b/test/TritonGPU/coalesce.mlir new file mode 100644 index 000000000000..60e359f5277d --- /dev/null +++ b/test/TritonGPU/coalesce.mlir @@ -0,0 +1,53 @@ +// RUN: triton-opt %s -split-input-file -tritongpu-coalesce -canonicalize | FileCheck %s + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}> +#blocked1 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [32, 1], warpsPerCTA = [4, 1], order = [0, 1]}> +#blocked2 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [1, 32], warpsPerCTA = [1, 4], order = [0, 1]}> +#slice1dim1 = #triton_gpu.slice<{dim = 1, parent = #blocked1}> +#slice2dim0 = #triton_gpu.slice<{dim = 0, parent = #blocked2}> + +module attributes {"triton_gpu.num-warps" = 4 : i32} { + + +// CHECK: [[row_layout:#.*]] = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [2, 16], warpsPerCTA = [4, 1], order = [1, 0]}> +// CHECK: [[col_layout:#.*]] = #triton_gpu.blocked<{sizePerThread = [4, 1], threadsPerWarp = [16, 2], warpsPerCTA = [1, 4], order = [0, 1]}> +// CHECK: [[load_ptr:%.*]] = triton_gpu.convert_layout {{.*}} -> tensor<64x64x!tt.ptr, [[row_layout]]> +// CHECK: [[load_mask:%.*]] = triton_gpu.convert_layout {{.*}} -> tensor<64x64xi1, [[row_layout]]> +// CHECK: [[load_other:%.*]] = triton_gpu.convert_layout {{.*}} -> tensor<64x64xf32, [[row_layout]]> +// CHECK: [[load_val:%.*]] = tt.load [[load_ptr]], [[load_mask]], [[load_other]] {{.*}} : tensor<64x64xf32, [[row_layout]]> +// CHECK: [[store_ptr:%.*]] = triton_gpu.convert_layout {{.*}} -> tensor<64x64x!tt.ptr, [[col_layout]]> +// CHECK: [[store_val:%.*]] = triton_gpu.convert_layout {{.*}} -> tensor<64x64xf32, [[col_layout]]> +// CHECK: [[store_mask:%.*]] = triton_gpu.convert_layout {{.*}} -> tensor<64x64xi1, [[col_layout]]> +// CHECK: tt.store [[store_ptr]], [[store_val]], [[store_mask]] +func @transpose(%arg0: !tt.ptr {tt.divisibility = 16 : i32}, + %arg1: i32 {tt.divisibility = 16 : i32}, + %arg2: !tt.ptr {tt.divisibility = 16 : i32}, + %arg3: i32 {tt.divisibility = 16 : i32}) { + %cst = arith.constant dense : tensor<64x64xi1, #blocked1> + %cst_0 = arith.constant dense<0.000000e+00> : tensor<64x64xf32, #blocked1> + %00 = tt.make_range {end = 64 : i32, start = 0 : i32} : tensor<64xi32, #slice1dim1> + %01 = tt.make_range {end = 64 : i32, start = 0 : i32} : tensor<64xi32, #slice2dim0> + %1 = tt.expand_dims %00 {axis = 1 : i32} : (tensor<64xi32, #slice1dim1>) -> tensor<64x1xi32, #blocked1> + %2 = tt.splat %arg1 : (i32) -> tensor<64x1xi32, #blocked1> + %3 = arith.muli %1, %2 : tensor<64x1xi32, #blocked1> + %4 = tt.splat %arg0 : (!tt.ptr) -> tensor<64x1x!tt.ptr, #blocked1> + %5 = tt.addptr %4, %3 : tensor<64x1x!tt.ptr, #blocked1>, tensor<64x1xi32, #blocked1> + %6 = tt.expand_dims %01 {axis = 0 : i32} : (tensor<64xi32, #slice2dim0>) -> tensor<1x64xi32, #blocked2> + %7 = tt.broadcast %5 : (tensor<64x1x!tt.ptr, #blocked1>) -> tensor<64x64x!tt.ptr, #blocked1> + %8 = tt.broadcast %6 : (tensor<1x64xi32, #blocked2>) -> tensor<64x64xi32, #blocked2> + %9 = triton_gpu.convert_layout %8 : (tensor<64x64xi32, #blocked2>) -> tensor<64x64xi32, #blocked1> + %10 = tt.addptr %7, %9 : tensor<64x64x!tt.ptr, #blocked1>, tensor<64x64xi32, #blocked1> + %11 = tt.splat %arg2 : (!tt.ptr) -> tensor<64x1x!tt.ptr, #blocked1> + %12 = tt.addptr %11, %1 : tensor<64x1x!tt.ptr, #blocked1>, tensor<64x1xi32, #blocked1> + %13 = tt.splat %arg3 : (i32) -> tensor<1x64xi32, #blocked2> + %14 = arith.muli %6, %13 : tensor<1x64xi32, #blocked2> + %15 = tt.broadcast %12 : (tensor<64x1x!tt.ptr, #blocked1>) -> tensor<64x64x!tt.ptr, #blocked1> + %16 = tt.broadcast %14 : (tensor<1x64xi32, #blocked2>) -> tensor<64x64xi32, #blocked2> + %17 = triton_gpu.convert_layout %16 : (tensor<64x64xi32, #blocked2>) -> tensor<64x64xi32, #blocked1> + %18 = tt.addptr %15, %17 : tensor<64x64x!tt.ptr, #blocked1>, tensor<64x64xi32, #blocked1> + %19 = tt.load %10, %cst, %cst_0 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<64x64xf32, #blocked1> + tt.store %18, %19, %cst : tensor<64x64xf32, #blocked1> + return +} + +} \ No newline at end of file diff --git a/test/TritonGPU/combine.mlir b/test/TritonGPU/combine.mlir new file mode 100644 index 000000000000..b4d2da376bb1 --- /dev/null +++ b/test/TritonGPU/combine.mlir @@ -0,0 +1,186 @@ +// RUN: triton-opt %s -tritongpu-combine 2>&1 | FileCheck %s + +#layout0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}> +#layout1 = #triton_gpu.blocked<{sizePerThread = [4], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}> + +// CHECK: [[target_layout:#.*]] = #triton_gpu.blocked<{sizePerThread = [4], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}> +// CHECK: [[row_layout:#.*]] = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [2, 16], warpsPerCTA = [1, 4], order = [1, 0]}> +// CHECK: [[col_layout:#.*]] = #triton_gpu.blocked<{sizePerThread = [4, 1], threadsPerWarp = [16, 2], warpsPerCTA = [4, 1], order = [0, 1]}> +// CHECK: [[col_layout_novec:#.*]] = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [32, 1], warpsPerCTA = [4, 1], order = [0, 1]}> + +func @cst() -> tensor<1024xi32, #layout1> { + %cst = arith.constant dense<0> : tensor<1024xi32, #layout0> + %1 = triton_gpu.convert_layout %cst : (tensor<1024xi32, #layout0>) -> tensor<1024xi32, #layout1> + // CHECK-NOT: triton_gpu.convert_layout + // CHECK: return %cst : tensor<1024xi32, [[target_layout]]> + return %1: tensor<1024xi32, #layout1> +} + +func @range() -> tensor<1024xi32, #layout1> { + %0 = tt.make_range {end = 1024 : i32, start = 0 : i32} : tensor<1024xi32, #layout0> + %1 = triton_gpu.convert_layout %0 : (tensor<1024xi32, #layout0>) -> tensor<1024xi32, #layout1> + // CHECK-NOT: triton_gpu.convert_layout + // CHECK: return %0 : tensor<1024xi32, [[target_layout]]> + return %1: tensor<1024xi32, #layout1> +} + +func @splat(%arg0: i32) -> tensor<1024xi32, #layout1> { + %0 = tt.splat %arg0 : (i32) -> tensor<1024xi32, #layout0> + %1 = triton_gpu.convert_layout %0 : (tensor<1024xi32, #layout0>) -> tensor<1024xi32, #layout1> + // CHECK-NOT: triton_gpu.convert_layout + // CHECK: return %0 : tensor<1024xi32, [[target_layout]]> + return %1: tensor<1024xi32, #layout1> +} + +func @remat(%arg0: i32) -> tensor<1024xi32, #layout1> { + %0 = tt.make_range {end = 1024 : i32, start = 0 : i32} : tensor<1024xi32, #layout0> + %1 = tt.make_range {end = 1024 : i32, start = 0 : i32} : tensor<1024xi32, #layout0> + %2 = arith.muli %0, %1 : tensor<1024xi32, #layout0> + %3 = triton_gpu.convert_layout %2 : (tensor<1024xi32, #layout0>) -> tensor<1024xi32, #layout1> + %4 = tt.splat %arg0 : (i32) -> tensor<1024xi32, #layout0> + %5 = triton_gpu.convert_layout %2 : (tensor<1024xi32, #layout0>) -> tensor<1024xi32, #layout1> + %6 = arith.addi %3, %5 : tensor<1024xi32, #layout1> + return %6: tensor<1024xi32, #layout1> + // CHECK: %0 = tt.make_range {end = 1024 : i32, start = 0 : i32} : tensor<1024xi32, [[target_layout]]> + // CHECK: %1 = tt.make_range {end = 1024 : i32, start = 0 : i32} : tensor<1024xi32, [[target_layout]]> + // CHECK: %2 = tt.make_range {end = 1024 : i32, start = 0 : i32} : tensor<1024xi32, [[target_layout]]> + // CHECK: %3 = tt.make_range {end = 1024 : i32, start = 0 : i32} : tensor<1024xi32, [[target_layout]]> + // CHECK: %4 = arith.muli %0, %2 : tensor<1024xi32, [[target_layout]]> + // CHECK: %5 = arith.muli %1, %3 : tensor<1024xi32, [[target_layout]]> + // CHECK: %6 = arith.addi %4, %5 : tensor<1024xi32, [[target_layout]]> + // CHECK: return %6 : tensor<1024xi32, [[target_layout]]> +} + +#blocked0 = #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [4], order = [0]}> +#blocked1 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [32, 1], warpsPerCTA = [4, 1], order = [0, 1]}> +#slice1dim1 = #triton_gpu.slice<{dim = 1, parent = #blocked1}> +#blocked2 = #triton_gpu.blocked<{sizePerThread = [1, 1], threadsPerWarp = [1, 32], warpsPerCTA = [1, 4], order = [0, 1]}> +#slice2dim0 = #triton_gpu.slice<{dim = 0, parent = #blocked2}> +#blocked3 = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [2, 16], warpsPerCTA = [1, 4], order = [1, 0]}> +#blocked4 = #triton_gpu.blocked<{sizePerThread = [4, 1], threadsPerWarp = [16, 2], warpsPerCTA = [4, 1], order = [0, 1]}> + +// CHECK-LABEL: transpose +func @transpose(%arg0: !tt.ptr {tt.divisibility = 16 : i32}, %arg1: i32 {tt.divisibility = 16 : i32}, %arg2: !tt.ptr {tt.divisibility = 16 : i32}, %arg3: i32 {tt.divisibility = 16 : i32}) { + // CHECK-NOT: triton_gpu.convert_layout + // CHECK: [[loaded_val:%.*]] = tt.load {{.*}}, %cst, %cst_0 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<64x64xf32, [[row_layout]]> + // CHECK: [[cvt_val:%.*]] = triton_gpu.convert_layout [[loaded_val]] : (tensor<64x64xf32, [[row_layout]]>) -> tensor<64x64xf32, [[col_layout]]> + // CHECK: tt.store {{.*}}, [[cvt_val]], %cst_1 : tensor<64x64xf32, [[col_layout]]> + // CHECK: return + %cst = arith.constant dense<0.000000e+00> : tensor<64x64xf32, #blocked1> + %cst_0 = arith.constant dense : tensor<64x64xi1, #blocked1> + %00 = tt.make_range {end = 64 : i32, start = 0 : i32} : tensor<64xi32, #slice1dim1> + %01 = tt.make_range {end = 64 : i32, start = 0 : i32} : tensor<64xi32, #slice2dim0> + %1 = tt.expand_dims %00 {axis = 1 : i32} : (tensor<64xi32, #slice1dim1>) -> tensor<64x1xi32, #blocked1> + %2 = tt.splat %arg1 : (i32) -> tensor<64x1xi32, #blocked1> + %3 = arith.muli %1, %2 : tensor<64x1xi32, #blocked1> + %4 = tt.splat %arg0 : (!tt.ptr) -> tensor<64x1x!tt.ptr, #blocked1> + %5 = tt.addptr %4, %3 : tensor<64x1x!tt.ptr, #blocked1>, tensor<64x1xi32, #blocked1> + %6 = tt.expand_dims %01 {axis = 0 : i32} : (tensor<64xi32, #slice2dim0>) -> tensor<1x64xi32, #blocked2> + %7 = tt.broadcast %5 : (tensor<64x1x!tt.ptr, #blocked1>) -> tensor<64x64x!tt.ptr, #blocked1> + %8 = tt.broadcast %6 : (tensor<1x64xi32, #blocked2>) -> tensor<64x64xi32, #blocked2> + %9 = triton_gpu.convert_layout %8 : (tensor<64x64xi32, #blocked2>) -> tensor<64x64xi32, #blocked1> + %10 = tt.addptr %7, %9 : tensor<64x64x!tt.ptr, #blocked1>, tensor<64x64xi32, #blocked1> + %11 = tt.splat %arg2 : (!tt.ptr) -> tensor<64x1x!tt.ptr, #blocked1> + %12 = tt.addptr %11, %1 : tensor<64x1x!tt.ptr, #blocked1>, tensor<64x1xi32, #blocked1> + %13 = tt.splat %arg3 : (i32) -> tensor<1x64xi32, #blocked2> + %14 = arith.muli %6, %13 : tensor<1x64xi32, #blocked2> + %15 = tt.broadcast %12 : (tensor<64x1x!tt.ptr, #blocked1>) -> tensor<64x64x!tt.ptr, #blocked1> + %16 = tt.broadcast %14 : (tensor<1x64xi32, #blocked2>) -> tensor<64x64xi32, #blocked2> + %17 = triton_gpu.convert_layout %16 : (tensor<64x64xi32, #blocked2>) -> tensor<64x64xi32, #blocked1> + %18 = tt.addptr %15, %17 : tensor<64x64x!tt.ptr, #blocked1>, tensor<64x64xi32, #blocked1> + %19 = triton_gpu.convert_layout %10 : (tensor<64x64x!tt.ptr, #blocked1>) -> tensor<64x64x!tt.ptr, #blocked3> + %20 = triton_gpu.convert_layout %cst_0 : (tensor<64x64xi1, #blocked1>) -> tensor<64x64xi1, #blocked3> + %21 = triton_gpu.convert_layout %cst : (tensor<64x64xf32, #blocked1>) -> tensor<64x64xf32, #blocked3> + %22 = tt.load %19, %20, %21 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<64x64xf32, #blocked3> + %23 = triton_gpu.convert_layout %22 : (tensor<64x64xf32, #blocked3>) -> tensor<64x64xf32, #blocked1> + %24 = triton_gpu.convert_layout %18 : (tensor<64x64x!tt.ptr, #blocked1>) -> tensor<64x64x!tt.ptr, #blocked4> + %25 = triton_gpu.convert_layout %23 : (tensor<64x64xf32, #blocked1>) -> tensor<64x64xf32, #blocked4> + %26 = triton_gpu.convert_layout %cst_0 : (tensor<64x64xi1, #blocked1>) -> tensor<64x64xi1, #blocked4> + tt.store %24, %25, %26 : tensor<64x64xf32, #blocked4> + return +} + +// CHECK-LABEL: loop +func @loop(%arg0: !tt.ptr, %arg1: i32, %arg2: !tt.ptr, %arg3: i32, %arg4: i32) { + // CHECK-NOT: triton_gpu.convert_layout + // CHECK: [[loop_ret:%.*]]:2 = scf.for {{.*}} -> (tensor<64x64xf32, [[row_layout]]>, tensor<64x64x!tt.ptr, [[row_layout]]>) + // CHECK-NEXT: {{.*}} = tt.load {{.*}} : tensor<64x64xf32, [[row_layout]]> + // CHECK-NEXT: {{.*}} = arith.addf {{.*}} : tensor<64x64xf32, [[row_layout]]> + // CHECK-NEXT: {{.*}} = tt.addptr {{.*}} : tensor<64x64x!tt.ptr, [[row_layout]]>, tensor<64x64xi32, [[row_layout]]> + // CHECK-NEXT: scf.yield {{.*}} : tensor<64x64xf32, [[row_layout]]>, tensor<64x64x!tt.ptr, [[row_layout]]> + // CHECK-NEXT: } + // CHECK-NEXT: {{.*}} = triton_gpu.convert_layout [[loop_ret]]#0 : (tensor<64x64xf32, [[row_layout]]>) -> tensor<64x64xf32, [[col_layout_novec]]> + // CHECK-NOT: triton_gpu.convert_layout + %cst = arith.constant dense : tensor<64x64xi1, #blocked1> + %cst_0 = arith.constant dense<64> : tensor<64x64xi32, #blocked1> + %c1 = arith.constant 1 : index + %c32 = arith.constant 32 : index + %c0 = arith.constant 0 : index + %cst_1 = arith.constant dense<0.000000e+00> : tensor<64x64xf32, #blocked1> + %00 = tt.make_range {end = 64 : i32, start = 0 : i32} : tensor<64xi32, #slice1dim1> + %01 = tt.make_range {end = 64 : i32, start = 0 : i32} : tensor<64xi32, #slice2dim0> + %1 = tt.expand_dims %00 {axis = 1 : i32} : (tensor<64xi32, #slice1dim1>) -> tensor<64x1xi32, #blocked1> + %2 = tt.splat %arg1 : (i32) -> tensor<64x1xi32, #blocked1> + %3 = arith.muli %1, %2 : tensor<64x1xi32, #blocked1> + %4 = tt.splat %arg0 : (!tt.ptr) -> tensor<64x1x!tt.ptr, #blocked1> + %5 = tt.addptr %4, %3 : tensor<64x1x!tt.ptr, #blocked1>, tensor<64x1xi32, #blocked1> + %6 = tt.expand_dims %01 {axis = 0 : i32} : (tensor<64xi32, #slice2dim0>) -> tensor<1x64xi32, #blocked2> + %7 = tt.broadcast %5 : (tensor<64x1x!tt.ptr, #blocked1>) -> tensor<64x64x!tt.ptr, #blocked1> + %8 = tt.broadcast %6 : (tensor<1x64xi32, #blocked2>) -> tensor<64x64xi32, #blocked2> + %9 = triton_gpu.convert_layout %8 : (tensor<64x64xi32, #blocked2>) -> tensor<64x64xi32, #blocked1> + %10 = tt.addptr %7, %9 : tensor<64x64x!tt.ptr, #blocked1>, tensor<64x64xi32, #blocked1> + %11:2 = scf.for %arg5 = %c0 to %c32 step %c1 iter_args(%arg6 = %cst_1, %arg7 = %10) -> (tensor<64x64xf32, #blocked1>, tensor<64x64x!tt.ptr, #blocked1>) { + %23 = triton_gpu.convert_layout %arg7 : (tensor<64x64x!tt.ptr, #blocked1>) -> tensor<64x64x!tt.ptr, #blocked3> + %24 = triton_gpu.convert_layout %cst : (tensor<64x64xi1, #blocked1>) -> tensor<64x64xi1, #blocked3> + %25 = triton_gpu.convert_layout %cst_1 : (tensor<64x64xf32, #blocked1>) -> tensor<64x64xf32, #blocked3> + %26 = tt.load %23, %24, %25 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<64x64xf32, #blocked3> + %27 = triton_gpu.convert_layout %26 : (tensor<64x64xf32, #blocked3>) -> tensor<64x64xf32, #blocked1> + %28 = arith.addf %arg6, %27 : tensor<64x64xf32, #blocked1> + %29 = tt.addptr %arg7, %cst_0 : tensor<64x64x!tt.ptr, #blocked1>, tensor<64x64xi32, #blocked1> + scf.yield %28, %29 : tensor<64x64xf32, #blocked1>, tensor<64x64x!tt.ptr, #blocked1> + } + %12 = tt.splat %arg2 : (!tt.ptr) -> tensor<64x1x!tt.ptr, #blocked1> + %13 = tt.addptr %12, %1 : tensor<64x1x!tt.ptr, #blocked1>, tensor<64x1xi32, #blocked1> + %14 = tt.splat %arg3 : (i32) -> tensor<1x64xi32, #blocked2> + %15 = arith.muli %6, %14 : tensor<1x64xi32, #blocked2> + %16 = tt.broadcast %13 : (tensor<64x1x!tt.ptr, #blocked1>) -> tensor<64x64x!tt.ptr, #blocked1> + %17 = tt.broadcast %15 : (tensor<1x64xi32, #blocked2>) -> tensor<64x64xi32, #blocked2> + %18 = triton_gpu.convert_layout %17 : (tensor<64x64xi32, #blocked2>) -> tensor<64x64xi32, #blocked1> + %19 = tt.addptr %16, %18 : tensor<64x64x!tt.ptr, #blocked1>, tensor<64x64xi32, #blocked1> + %20 = triton_gpu.convert_layout %19 : (tensor<64x64x!tt.ptr, #blocked1>) -> tensor<64x64x!tt.ptr, #blocked1> + %21 = triton_gpu.convert_layout %11#0 : (tensor<64x64xf32, #blocked1>) -> tensor<64x64xf32, #blocked1> + %22 = triton_gpu.convert_layout %cst : (tensor<64x64xi1, #blocked1>) -> tensor<64x64xi1, #blocked1> + tt.store %20, %21, %22 : tensor<64x64xf32, #blocked1> + return +} + +// CHECK-LABEL: vecadd +func @vecadd(%arg0: !tt.ptr {tt.divisibility = 16 : i32}, %arg1: !tt.ptr {tt.divisibility = 16 : i32}, %arg2: !tt.ptr {tt.divisibility = 16 : i32}, %arg3: i32) { + // CHECK-NOT: triton_gpu.convert_layout + %c256_i32 = arith.constant 256 : i32 + %0 = tt.get_program_id {axis = 0 : i32} : i32 + %1 = arith.muli %0, %c256_i32 : i32 + %2 = tt.splat %1 : (i32) -> tensor<256xi32, #layout1> + %3 = tt.make_range {end = 256 : i32, start = 0 : i32} : tensor<256xi32, #layout1> + %4 = tt.splat %1 : (i32) -> tensor<256xi32, #layout1> + %5 = tt.make_range {end = 256 : i32, start = 0 : i32} : tensor<256xi32, #layout1> + %6 = tt.splat %1 : (i32) -> tensor<256xi32, #layout1> + %7 = tt.make_range {end = 256 : i32, start = 0 : i32} : tensor<256xi32, #layout1> + %8 = tt.splat %arg0 : (!tt.ptr) -> tensor<256x!tt.ptr, #layout1> + %9 = arith.addi %6, %7 : tensor<256xi32, #layout1> + %10 = tt.splat %arg1 : (!tt.ptr) -> tensor<256x!tt.ptr, #layout1> + %11 = arith.addi %4, %5 : tensor<256xi32, #layout1> + %12 = tt.addptr %8, %9 : tensor<256x!tt.ptr, #layout1>, tensor<256xi32, #layout1> + %13 = tt.load %12 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32, #layout1> + %14 = triton_gpu.convert_layout %13 : (tensor<256xf32, #layout1>) -> tensor<256xf32, #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [2], order = [0]}>> + %15 = tt.addptr %10, %11 : tensor<256x!tt.ptr, #layout1>, tensor<256xi32, #layout1> + %16 = tt.load %15 {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<256xf32, #layout1> + %17 = triton_gpu.convert_layout %16 : (tensor<256xf32, #layout1>) -> tensor<256xf32, #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [2], order = [0]}>> + %18 = arith.addf %14, %17 : tensor<256xf32, #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [2], order = [0]}>> + %19 = tt.splat %arg2 : (!tt.ptr) -> tensor<256x!tt.ptr, #layout1> + %20 = arith.addi %2, %3 : tensor<256xi32, #layout1> + %21 = tt.addptr %19, %20 : tensor<256x!tt.ptr, #layout1>, tensor<256xi32, #layout1> + %22 = triton_gpu.convert_layout %18 : (tensor<256xf32, #triton_gpu.blocked<{sizePerThread = [1], threadsPerWarp = [32], warpsPerCTA = [2], order = [0]}>>) -> tensor<256xf32, #layout1> + tt.store %21, %22 : tensor<256xf32, #layout1> + return +} diff --git a/test/TritonGPU/loop-pipeline.mlir b/test/TritonGPU/loop-pipeline.mlir new file mode 100644 index 000000000000..d8b850615e82 --- /dev/null +++ b/test/TritonGPU/loop-pipeline.mlir @@ -0,0 +1,183 @@ +// RUN: triton-opt %s -split-input-file -tritongpu-pipeline=num-stages=3 -canonicalize | FileCheck %s + +// 4 warps +// matmul: 128x32 @ 32x128 -> 128x128 +#AL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [4, 8], warpsPerCTA = [4, 1], order = [1, 0]}> +#BL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [1, 32], warpsPerCTA = [4, 1], order = [1, 0]}> +#C = #triton_gpu.mma<{versionMajor = 2, warpsPerCTA = [4, 1]}> +#A = #triton_gpu.dot_op<{opIdx = 0, parent = #C}> +#B = #triton_gpu.dot_op<{opIdx = 1, parent = #C}> + +// CHECK: func @matmul_loop +// CHECK-DAG: %[[CONSTANT_0:.*]] = arith.constant 0 : i32 +// CHECK-DAG: %[[CONSTANT_1:.*]] = arith.constant 1 : i32 +// CHECK-DAG: %[[CONSTANT_2:.*]] = arith.constant 2 : i32 +// CHECK-DAG: %[[CONSTANT_3:.*]] = arith.constant 3 : i32 +// CHECK-DAG: %[[LOOP_COND_0:.*]] = arith.cmpi slt, %[[LB:.*]], %[[UB:.*]] +// CHECK: %[[ABUFFER:.*]] = triton_gpu.alloc_tensor +// CHECK-DAG: %[[LOOP_COND_0_SPLAT_A:.*]] = tt.splat %[[LOOP_COND_0]] +// CHECK: %[[A0BUFFER:.*]] = triton_gpu.insert_slice_async {{.*}}, {{.*}}, %[[CONSTANT_0]], %[[LOOP_COND_0_SPLAT_A]] +// CHECK: %[[BBUFFER:.*]] = triton_gpu.alloc_tensor +// CHECK-DAG: %[[LOOP_COND_0_SPLAT_B:.*]] = tt.splat %[[LOOP_COND_0]] +// CHECK: %[[B0BUFFER:.*]] = triton_gpu.insert_slice_async {{.*}}, {{.*}}, %[[CONSTANT_0]], %[[LOOP_COND_0_SPLAT_B]] +// CHECK-DAG: %[[IV_1:.*]] = arith.addi %[[LB]], %[[STEP:.*]] +// CHECK-DAG: %[[LOOP_COND_1:.*]] = arith.cmpi slt, %[[IV_1]], %[[UB]] +// CHECK-DAG: %[[LOOP_COND_1_SPLAT_A:.*]] = tt.splat %[[LOOP_COND_1]] +// CHECK: %[[A1BUFFER:.*]] = triton_gpu.insert_slice_async {{.*}}, {{.*}}, %[[CONSTANT_1]], %[[LOOP_COND_1_SPLAT_A]] +// CHECK-DAG: %[[LOOP_COND_1_SPLAT_B:.*]] = tt.splat %[[LOOP_COND_1]] +// CHECK: %[[B1BUFFER:.*]] = triton_gpu.insert_slice_async {{.*}}, {{.*}}, %[[CONSTANT_1]], %[[LOOP_COND_1_SPLAT_B]] +// CHECK: triton_gpu.async_wait {num = 2 : i32} +// CHECK: %[[A0:.*]] = tensor.extract_slice %[[A1BUFFER]][0, 0, 0] +// CHECK: %[[B0:.*]] = tensor.extract_slice %[[B1BUFFER]][0, 0, 0] +// CHECK: scf.for {{.*}} iter_args({{.*}}, {{.*}}, {{.*}}, {{.*}}, {{.*}}, %[[arg_a0:.*]] = %[[A0]], %[[arg_b0:.*]] = %[[B0]], {{.*}}, {{.*}}, {{.*}}, %[[PIPELINE_IDX:.*]] = %[[CONSTANT_2]], %[[LOOP_IDX:.*]] = %[[CONSTANT_1]] +// CHECK: %[[arg_a0_dot_op:.*]] = triton_gpu.convert_layout %[[arg_a0]] +// CHECK: %[[arg_b0_dot_op:.*]] = triton_gpu.convert_layout %[[arg_b0]] +// CHECK: tt.dot %[[arg_a0_dot_op]], %[[arg_b0_dot_op]], {{.*}} +// CHECK-DAG: %[[INSERT_IDX:.*]] = arith.remsi %[[PIPELINE_IDX]], %[[CONSTANT_3]] +// CHECK-DAG: %[[EXTRACT_INT:.*]] = arith.remsi %[[LOOP_IDX]], %[[CONSTANT_3]] +// CHECK-DAG: %[[EXTRACT_IDX:.*]] = arith.index_cast %[[EXTRACT_INT]] : i32 to index +// CHECK: %[[NEXT_A_BUFFER:.*]] = triton_gpu.insert_slice_async {{.*}}, {{.*}}, %[[INSERT_IDX]] +// CHECK: %[[NEXT_B_BUFFER:.*]] = triton_gpu.insert_slice_async {{.*}}, {{.*}}, %[[INSERT_IDX]] +// CHECK: triton_gpu.async_wait {num = 2 : i32} +// CHECK: %[[NEXT_A:.*]] = tensor.extract_slice %[[NEXT_A_BUFFER]][%[[EXTRACT_IDX]], 0, 0] +// CHECK: %[[NEXT_B:.*]] = tensor.extract_slice %[[NEXT_B_BUFFER]][%[[EXTRACT_IDX]], 0, 0] +// CHECK-DAG: %[[NEXT_PIPELINE_IDX:.*]] = arith.addi %[[PIPELINE_IDX]], %[[CONSTANT_1]] +// CHECK-DAG: %[[NEXT_LOOP_IDX:.*]] = arith.addi %[[LOOP_IDX]], %[[CONSTANT_1]] +// CHECK: scf.yield {{.*}}, {{.*}}, {{.*}}, %[[NEXT_A_BUFFER]], %[[NEXT_B_BUFFER]], %[[NEXT_A]], %[[NEXT_B]], {{.*}}, {{.*}}, {{.*}}, %[[NEXT_PIPELINE_IDX]], %[[NEXT_LOOP_IDX]] +func @matmul_loop(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr) { + %a_ptr_init = tt.broadcast %A : (!tt.ptr) -> tensor<128x32x!tt.ptr, #AL> + %b_ptr_init = tt.broadcast %B : (!tt.ptr) -> tensor<32x128x!tt.ptr, #BL> + + %a_mask = arith.constant dense : tensor<128x32xi1, #AL> + %a_other = arith.constant dense<0.00e+00> : tensor<128x32xf16, #AL> + %b_mask = arith.constant dense : tensor<32x128xi1, #BL> + %b_other = arith.constant dense<0.00e+00> : tensor<32x128xf16, #BL> + %c_init = arith.constant dense<0.00e+00> : tensor<128x128xf32, #C> + + %a_off = arith.constant dense<4> : tensor<128x32xi32, #AL> + %b_off = arith.constant dense<4> : tensor<32x128xi32, #BL> + + scf.for %iv = %lb to %ub step %step iter_args(%a_ptr = %a_ptr_init, %b_ptr = %b_ptr_init, %prev_c = %c_init) -> (tensor<128x32x!tt.ptr, #AL>, tensor<32x128x!tt.ptr, #BL>, tensor<128x128xf32, #C>) { + %a_ = tt.load %a_ptr {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x32xf16, #AL> + %a = triton_gpu.convert_layout %a_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A> + %b_ = tt.load %b_ptr, %b_mask, %b_other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<32x128xf16, #BL> + %b = triton_gpu.convert_layout %b_ : (tensor<32x128xf16, #BL>) -> tensor<32x128xf16, #B> + + %c = tt.dot %a, %b, %prev_c {allowTF32 = true, transA = false, transB = false} : tensor<128x32xf16, #A> * tensor<32x128xf16, #B> -> tensor<128x128xf32, #C> + + %next_a_ptr = tt.addptr %a_ptr, %a_off : tensor<128x32x!tt.ptr, #AL>, tensor<128x32xi32, #AL> + %next_b_ptr = tt.addptr %b_ptr, %b_off : tensor<32x128x!tt.ptr, #BL>, tensor<32x128xi32, #BL> + scf.yield %next_a_ptr, %next_b_ptr, %c : tensor<128x32x!tt.ptr, #AL>, tensor<32x128x!tt.ptr, #BL>, tensor<128x128xf32, #C> + } + return +} + + +// CHECK: func @matmul_loop_nested +// CHECK-DAG: %[[CONSTANT_0:.*]] = arith.constant 0 : i32 +// CHECK-DAG: %[[CONSTANT_1:.*]] = arith.constant 1 : i32 +// CHECK-DAG: %[[CONSTANT_2:.*]] = arith.constant 2 : i32 +// CHECK-DAG: %[[CONSTANT_3:.*]] = arith.constant 3 : i32 +// CHECK: scf.for +// CHECK: %[[ABUFFER:.*]] = triton_gpu.alloc_tensor +// CHECK: %[[A0BUFFER:.*]] = triton_gpu.insert_slice_async {{.*}}, {{.*}}, %[[CONSTANT_0]] +// CHECK: %[[BBUFFER:.*]] = triton_gpu.alloc_tensor +// CHECK: %[[B0BUFFER:.*]] = triton_gpu.insert_slice_async {{.*}}, {{.*}}, %[[CONSTANT_0]] +// CHECK: %[[A1BUFFER:.*]] = triton_gpu.insert_slice_async {{.*}}, {{.*}}, %[[CONSTANT_1]] +// CHECK: %[[B1BUFFER:.*]] = triton_gpu.insert_slice_async {{.*}}, {{.*}}, %[[CONSTANT_1]] +// CHECK: triton_gpu.async_wait {num = 2 : i32} +// CHECK: %[[A0:.*]] = tensor.extract_slice %[[A1BUFFER]][0, 0, 0] +// CHECK: %[[B0:.*]] = tensor.extract_slice %[[B1BUFFER]][0, 0, 0] +// CHECK: scf.for {{.*}} iter_args({{.*}}, {{.*}}, {{.*}}, {{.*}}, {{.*}}, %[[arg_a0:.*]] = %[[A0]], %[[arg_b0:.*]] = %[[B0]], {{.*}}, {{.*}}, {{.*}}, %[[PIPELINE_IDX:.*]] = %[[CONSTANT_2]], %[[LOOP_IDX:.*]] = %[[CONSTANT_1]] +// CHECK: %[[arg_a0_dot_op:.*]] = triton_gpu.convert_layout %[[arg_a0]] +// CHECK: %[[arg_b0_dot_op:.*]] = triton_gpu.convert_layout %[[arg_b0]] +// CHECK: tt.dot %[[arg_a0_dot_op]], %[[arg_b0_dot_op]], {{.*}} +// CHECK-DAG: %[[INSERT_IDX:.*]] = arith.remsi %[[PIPELINE_IDX]], %[[CONSTANT_3]] +// CHECK-DAG: %[[EXTRACT_INT:.*]] = arith.remsi %[[LOOP_IDX]], %[[CONSTANT_3]] +// CHECK-DAG: %[[EXTRACT_IDX:.*]] = arith.index_cast %[[EXTRACT_INT]] : i32 to index +// CHECK: %[[NEXT_A_BUFFER:.*]] = triton_gpu.insert_slice_async {{.*}}, {{.*}}, %[[INSERT_IDX]] +// CHECK: %[[NEXT_B_BUFFER:.*]] = triton_gpu.insert_slice_async {{.*}}, {{.*}}, %[[INSERT_IDX]] +// CHECK: triton_gpu.async_wait {num = 2 : i32} +// CHECK: %[[NEXT_A:.*]] = tensor.extract_slice %[[NEXT_A_BUFFER]][%[[EXTRACT_IDX]], 0, 0] +// CHECK: %[[NEXT_B:.*]] = tensor.extract_slice %[[NEXT_B_BUFFER]][%[[EXTRACT_IDX]], 0, 0] +// CHECK-DAG: %[[NEXT_PIPELINE_IDX:.*]] = arith.addi %[[PIPELINE_IDX]], %[[CONSTANT_1]] +// CHECK-DAG: %[[NEXT_LOOP_IDX:.*]] = arith.addi %[[LOOP_IDX]], %[[CONSTANT_1]] +// CHECK: scf.yield {{.*}}, {{.*}}, {{.*}}, %[[NEXT_A_BUFFER]], %[[NEXT_B_BUFFER]], %[[NEXT_A]], %[[NEXT_B]], {{.*}}, {{.*}}, {{.*}}, %[[NEXT_PIPELINE_IDX]], %[[NEXT_LOOP_IDX]] +func @matmul_loop_nested(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr) { + scf.for %iv0 = %lb to %ub step %step { + %a_ptr_init = tt.broadcast %A : (!tt.ptr) -> tensor<128x32x!tt.ptr, #AL> + %b_ptr_init = tt.broadcast %B : (!tt.ptr) -> tensor<32x128x!tt.ptr, #BL> + + %a_mask = arith.constant dense : tensor<128x32xi1, #AL> + %a_other = arith.constant dense<0.00e+00> : tensor<128x32xf16, #AL> + %b_mask = arith.constant dense : tensor<32x128xi1, #BL> + %b_other = arith.constant dense<0.00e+00> : tensor<32x128xf16, #BL> + %c_init = arith.constant dense<0.00e+00> : tensor<128x128xf32, #C> + + %a_off = arith.constant dense<4> : tensor<128x32xi32, #AL> + %b_off = arith.constant dense<4> : tensor<32x128xi32, #BL> + + scf.for %iv = %lb to %ub step %step iter_args(%a_ptr = %a_ptr_init, %b_ptr = %b_ptr_init, %prev_c = %c_init) -> (tensor<128x32x!tt.ptr, #AL>, tensor<32x128x!tt.ptr, #BL>, tensor<128x128xf32, #C>) { + %a_ = tt.load %a_ptr, %a_mask, %a_other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x32xf16, #AL> + %a = triton_gpu.convert_layout %a_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A> + %b_ = tt.load %b_ptr, %b_mask, %b_other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<32x128xf16, #BL> + %b = triton_gpu.convert_layout %b_ : (tensor<32x128xf16, #BL>) -> tensor<32x128xf16, #B> + + %c = tt.dot %a, %b, %prev_c {allowTF32 = true, transA = false, transB = false} : tensor<128x32xf16, #A> * tensor<32x128xf16, #B> -> tensor<128x128xf32, #C> + + %next_a_ptr = tt.addptr %a_ptr, %a_off : tensor<128x32x!tt.ptr, #AL>, tensor<128x32xi32, #AL> + %next_b_ptr = tt.addptr %b_ptr, %b_off : tensor<32x128x!tt.ptr, #BL>, tensor<32x128xi32, #BL> + scf.yield %next_a_ptr, %next_b_ptr, %c : tensor<128x32x!tt.ptr, #AL>, tensor<32x128x!tt.ptr, #BL>, tensor<128x128xf32, #C> + } + } + return +} + + +// CHECK: func @matmul_loop_single_pipeline +// CHECK-DAG: %[[CONSTANT_0:.*]] = arith.constant 0 : i32 +// CHECK-DAG: %[[CONSTANT_1:.*]] = arith.constant 1 : i32 +// CHECK-DAG: %[[CONSTANT_2:.*]] = arith.constant 2 : i32 +// CHECK-DAG: %[[CONSTANT_3:.*]] = arith.constant 3 : i32 +// CHECK: %[[BBUFFER:.*]] = triton_gpu.alloc_tensor +// CHECK: %[[B0BUFFER:.*]] = triton_gpu.insert_slice_async {{.*}}, {{.*}}, %[[CONSTANT_0]] +// CHECK: %[[B1BUFFER:.*]] = triton_gpu.insert_slice_async {{.*}}, {{.*}}, %[[CONSTANT_1]] +// CHECK: triton_gpu.async_wait {num = 1 : i32} +// CHECK: %[[B0:.*]] = tensor.extract_slice %[[B1BUFFER]][0, 0, 0] +// CHECK: scf.for {{.*}} iter_args({{.*}}, {{.*}}, {{.*}}, %[[arg_b0:.*]] = %[[B0]], {{.*}}, {{.*}}, %[[PIPELINE_IDX:.*]] = %[[CONSTANT_2]], %[[LOOP_IDX:.*]] = %[[CONSTANT_1]] +// CHECK: %[[arg_b0_dot_op:.*]] = triton_gpu.convert_layout %[[arg_b0]] +// CHECK: tt.dot {{.*}}, %[[arg_b0_dot_op]], {{.*}} +// CHECK-DAG: %[[INSERT_IDX:.*]] = arith.remsi %[[PIPELINE_IDX]], %[[CONSTANT_3]] +// CHECK-DAG: %[[EXTRACT_INT:.*]] = arith.remsi %[[LOOP_IDX]], %[[CONSTANT_3]] +// CHECK-DAG: %[[EXTRACT_IDX:.*]] = arith.index_cast %[[EXTRACT_INT]] : i32 to index +// CHECK: %[[NEXT_B_BUFFER:.*]] = triton_gpu.insert_slice_async {{.*}}, {{.*}}, %[[INSERT_IDX]] +// CHECK: triton_gpu.async_wait {num = 1 : i32} +// CHECK: %[[NEXT_B:.*]] = tensor.extract_slice %[[NEXT_B_BUFFER]][%[[EXTRACT_IDX]], 0, 0] +// CHECK-DAG: %[[NEXT_PIPELINE_IDX:.*]] = arith.addi %[[PIPELINE_IDX]], %[[CONSTANT_1]] +// CHECK-DAG: %[[NEXT_LOOP_IDX:.*]] = arith.addi %[[LOOP_IDX]], %[[CONSTANT_1]] +// CHECK: scf.yield {{.*}}, {{.*}}, %[[NEXT_B_BUFFER]], %[[NEXT_B]], {{.*}}, {{.*}}, %[[NEXT_PIPELINE_IDX]], %[[NEXT_LOOP_IDX]] +func @matmul_loop_single_pipeline(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr) { + %a_ptr_init = tt.broadcast %A : (!tt.ptr) -> tensor<128x32x!tt.ptr, #AL> + %b_ptr_init = tt.broadcast %B : (!tt.ptr) -> tensor<32x128x!tt.ptr, #BL> + + %a_mask = arith.constant dense : tensor<128x32xi1, #AL> + %a_other = arith.constant dense<0.00e+00> : tensor<128x32xf16, #AL> + + %a_ = tt.load %a_ptr_init, %a_mask, %a_other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x32xf16, #AL> + %a = triton_gpu.convert_layout %a_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A> + + %b_mask = arith.constant dense : tensor<32x128xi1, #BL> + %b_other = arith.constant dense<0.00e+00> : tensor<32x128xf16, #BL> + %c_init = arith.constant dense<0.00e+00> : tensor<128x128xf32, #C> + + %b_off = arith.constant dense<4> : tensor<32x128xi32, #BL> + + scf.for %iv = %lb to %ub step %step iter_args(%b_ptr = %b_ptr_init, %prev_c = %c_init) -> (tensor<32x128x!tt.ptr, #BL>, tensor<128x128xf32, #C>) { + %b_ = tt.load %b_ptr, %b_mask, %b_other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<32x128xf16, #BL> + %b = triton_gpu.convert_layout %b_ : (tensor<32x128xf16, #BL>) -> tensor<32x128xf16, #B> + %c = tt.dot %a, %b, %prev_c {allowTF32 = true, transA = false, transB = false} : tensor<128x32xf16, #A> * tensor<32x128xf16, #B> -> tensor<128x128xf32, #C> + %next_b_ptr = tt.addptr %b_ptr, %b_off : tensor<32x128x!tt.ptr, #BL>, tensor<32x128xi32, #BL> + scf.yield %next_b_ptr, %c : tensor<32x128x!tt.ptr, #BL>, tensor<128x128xf32, #C> + } + return +} diff --git a/test/TritonGPU/matmul.mlir b/test/TritonGPU/matmul.mlir new file mode 100644 index 000000000000..9bd5318e1e88 --- /dev/null +++ b/test/TritonGPU/matmul.mlir @@ -0,0 +1,106 @@ +// RUN: triton-opt %s -split-input-file -convert-triton-to-tritongpu -tritongpu-combine -tritongpu-pipeline=num-stages=3 -tritongpu-combine -test-print-allocation 2>&1 | FileCheck %s + +// CHECK: offset = 0, size = 49152 +// CHECK: offset = 49152, size = 49152 +// CHECK: size = 98304 +module { +func @matmul_kernel__Pfp32_Pfp32_Pfp32_i32_i32_i32_i32_i32_i32_i32_i32_i32__12c64_13c64_14c64_15c8(%arg0: !tt.ptr {tt.divisibility = 16 : i32}, %arg1: !tt.ptr {tt.divisibility = 16 : i32}, %arg2: !tt.ptr {tt.divisibility = 16 : i32}, %arg3: i32, %arg4: i32, %arg5: i32, %arg6: i32 {tt.divisibility = 16 : i32}, %arg7: i32, %arg8: i32 {tt.divisibility = 16 : i32}, %arg9: i32, %arg10: i32 {tt.divisibility = 16 : i32}, %arg11: i32) { + %cst = arith.constant dense : tensor<64x64xi1> + %c64 = arith.constant 64 : index + %c0 = arith.constant 0 : index + %cst_0 = arith.constant dense<0.000000e+00> : tensor<64x64xf32> + %c64_i32 = arith.constant 64 : i32 + %c63_i32 = arith.constant 63 : i32 + %c8_i32 = arith.constant 8 : i32 + %0 = tt.get_program_id {axis = 0 : i32} : i32 + %1 = arith.addi %arg3, %c63_i32 : i32 + %2 = arith.divsi %1, %c64_i32 : i32 + %3 = arith.addi %arg4, %c63_i32 : i32 + %4 = arith.divsi %3, %c64_i32 : i32 + %5 = arith.muli %4, %c8_i32 : i32 + %6 = arith.divsi %0, %5 : i32 + %7 = arith.muli %6, %c8_i32 : i32 + %8 = arith.subi %2, %7 : i32 + %9 = arith.cmpi slt, %8, %c8_i32 : i32 + %10 = select %9, %8, %c8_i32 : i32 + %11 = arith.remsi %0, %10 : i32 + %12 = arith.addi %7, %11 : i32 + %13 = arith.remsi %0, %5 : i32 + %14 = arith.divsi %13, %10 : i32 + %15 = arith.muli %12, %c64_i32 : i32 + %16 = tt.make_range {end = 64 : i32, start = 0 : i32} : tensor<64xi32> + %17 = tt.splat %15 : (i32) -> tensor<64xi32> + %18 = arith.addi %17, %16 : tensor<64xi32> + %19 = arith.muli %14, %c64_i32 : i32 + %20 = tt.make_range {end = 64 : i32, start = 0 : i32} : tensor<64xi32> + %21 = tt.splat %19 : (i32) -> tensor<64xi32> + %22 = arith.addi %21, %20 : tensor<64xi32> + %23 = tt.make_range {end = 64 : i32, start = 0 : i32} : tensor<64xi32> + %24 = tt.expand_dims %18 {axis = 1 : i32} : (tensor<64xi32>) -> tensor<64x1xi32> + %25 = tt.splat %arg6 : (i32) -> tensor<64x1xi32> + %26 = arith.muli %24, %25 : tensor<64x1xi32> + %27 = tt.expand_dims %23 {axis = 0 : i32} : (tensor<64xi32>) -> tensor<1x64xi32> + %28 = tt.splat %arg7 : (i32) -> tensor<1x64xi32> + %29 = arith.muli %27, %28 : tensor<1x64xi32> + %30 = tt.broadcast %26 : (tensor<64x1xi32>) -> tensor<64x64xi32> + %31 = tt.broadcast %29 : (tensor<1x64xi32>) -> tensor<64x64xi32> + %32 = arith.addi %30, %31 : tensor<64x64xi32> + %33 = tt.splat %arg0 : (!tt.ptr) -> tensor<64x64x!tt.ptr> + %34 = tt.addptr %33, %32 : tensor<64x64x!tt.ptr>, tensor<64x64xi32> + %35 = tt.expand_dims %23 {axis = 1 : i32} : (tensor<64xi32>) -> tensor<64x1xi32> + %36 = tt.splat %arg8 : (i32) -> tensor<64x1xi32> + %37 = arith.muli %35, %36 : tensor<64x1xi32> + %38 = tt.expand_dims %22 {axis = 0 : i32} : (tensor<64xi32>) -> tensor<1x64xi32> + %39 = tt.splat %arg9 : (i32) -> tensor<1x64xi32> + %40 = arith.muli %38, %39 : tensor<1x64xi32> + %41 = tt.broadcast %37 : (tensor<64x1xi32>) -> tensor<64x64xi32> + %42 = tt.broadcast %40 : (tensor<1x64xi32>) -> tensor<64x64xi32> + %43 = arith.addi %41, %42 : tensor<64x64xi32> + %44 = tt.splat %arg1 : (!tt.ptr) -> tensor<64x64x!tt.ptr> + %45 = tt.addptr %44, %43 : tensor<64x64x!tt.ptr>, tensor<64x64xi32> + %46 = arith.index_cast %arg5 : i32 to index + %47:3 = scf.for %arg12 = %c0 to %46 step %c64 iter_args(%arg13 = %cst_0, %arg14 = %34, %arg15 = %45) -> (tensor<64x64xf32>, tensor<64x64x!tt.ptr>, tensor<64x64x!tt.ptr>) { + %76 = tt.load %arg14, %cst, %cst_0 {cache = 1 : i32, evict = 1 : i32, isVolatile = false, transA=false, transB=false} : tensor<64x64xf32> + %77 = tt.load %arg15, %cst, %cst_0 {cache = 1 : i32, evict = 1 : i32, isVolatile = false, transA=false, transB=false} : tensor<64x64xf32> + %78 = tt.dot %76, %77, %cst_0 {allowTF32 = true, transA = false, transB = false} : tensor<64x64xf32> * tensor<64x64xf32> -> tensor<64x64xf32> + %79 = arith.addf %arg13, %78 : tensor<64x64xf32> + %80 = arith.muli %arg7, %c64_i32 : i32 + %81 = tt.splat %80 : (i32) -> tensor<64x64xi32> + %82 = tt.addptr %arg14, %81 : tensor<64x64x!tt.ptr>, tensor<64x64xi32> + %83 = arith.muli %arg8, %c64_i32 : i32 + %84 = tt.splat %83 : (i32) -> tensor<64x64xi32> + %85 = tt.addptr %arg15, %84 : tensor<64x64x!tt.ptr>, tensor<64x64xi32> + scf.yield %79, %82, %85 : tensor<64x64xf32>, tensor<64x64x!tt.ptr>, tensor<64x64x!tt.ptr> + } + %48 = arith.muli %12, %c64_i32 : i32 + %49 = tt.make_range {end = 64 : i32, start = 0 : i32} : tensor<64xi32> + %50 = tt.splat %48 : (i32) -> tensor<64xi32> + %51 = arith.addi %50, %49 : tensor<64xi32> + %52 = arith.muli %14, %c64_i32 : i32 + %53 = tt.make_range {end = 64 : i32, start = 0 : i32} : tensor<64xi32> + %54 = tt.splat %52 : (i32) -> tensor<64xi32> + %55 = arith.addi %54, %53 : tensor<64xi32> + %56 = tt.expand_dims %51 {axis = 1 : i32} : (tensor<64xi32>) -> tensor<64x1xi32> + %57 = tt.splat %arg10 : (i32) -> tensor<64x1xi32> + %58 = arith.muli %57, %56 : tensor<64x1xi32> + %59 = tt.expand_dims %55 {axis = 0 : i32} : (tensor<64xi32>) -> tensor<1x64xi32> + %60 = tt.splat %arg11 : (i32) -> tensor<1x64xi32> + %61 = arith.muli %59, %60 : tensor<1x64xi32> + %62 = tt.broadcast %58 : (tensor<64x1xi32>) -> tensor<64x64xi32> + %63 = tt.broadcast %61 : (tensor<1x64xi32>) -> tensor<64x64xi32> + %64 = arith.addi %62, %63 : tensor<64x64xi32> + %65 = tt.splat %arg2 : (!tt.ptr) -> tensor<64x64x!tt.ptr> + %66 = tt.addptr %65, %64 : tensor<64x64x!tt.ptr>, tensor<64x64xi32> + %67 = tt.expand_dims %51 {axis = 1 : i32} : (tensor<64xi32>) -> tensor<64x1xi32> + %68 = tt.splat %arg3 : (i32) -> tensor<64x1xi32> + %69 = arith.cmpi slt, %67, %68 : tensor<64x1xi32> + %70 = tt.expand_dims %55 {axis = 0 : i32} : (tensor<64xi32>) -> tensor<1x64xi32> + %71 = tt.splat %arg4 : (i32) -> tensor<1x64xi32> + %72 = arith.cmpi slt, %70, %71 : tensor<1x64xi32> + %73 = tt.broadcast %69 : (tensor<64x1xi1>) -> tensor<64x64xi1> + %74 = tt.broadcast %72 : (tensor<1x64xi1>) -> tensor<64x64xi1> + %75 = arith.andi %73, %74 : tensor<64x64xi1> + tt.store %66, %47#0, %75 : tensor<64x64xf32> + return + } +} diff --git a/test/TritonGPU/prefetch.mlir b/test/TritonGPU/prefetch.mlir new file mode 100644 index 000000000000..5a8cd860beac --- /dev/null +++ b/test/TritonGPU/prefetch.mlir @@ -0,0 +1,65 @@ +// RUN: triton-opt %s -split-input-file -tritongpu-prefetch | FileCheck %s + +// 4 warps +// matmul: 128x32 @ 32x128 -> 128x128 +#AL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [4, 8], warpsPerCTA = [4, 1], order = [1, 0]}> +#BL = #triton_gpu.blocked<{sizePerThread = [1, 4], threadsPerWarp = [1, 32], warpsPerCTA = [4, 1], order = [1, 0]}> +#A = #triton_gpu.shared<{vec = 2, perPhase = 2, maxPhase = 4, order = [1, 0]}> +#B = #triton_gpu.shared<{vec = 2, perPhase = 2, maxPhase = 4, order = [1, 0]}> +#C = #triton_gpu.mma<{version = 2, warpsPerCTA = [4, 1]}> +#A_OP = #triton_gpu.dot_op<{opIdx = 0, parent = #C}> +#B_OP = #triton_gpu.dot_op<{opIdx = 1, parent = #C}> + + +// CHECK: func @matmul_loop +// CHECK-DAG: %[[A0_PREFETCH_SMEM:.*]] = tensor.extract_slice %[[A0:.*]][0, 0] [128, 16] +// CHECK-DAG: %[[A0_PREFETCH:.*]] = triton_gpu.convert_layout %[[A0_PREFETCH_SMEM]] +// CHECK-DAG: %[[B0_PREFETCH_SMEM:.*]] = tensor.extract_slice %[[B0:.*]][0, 0] [16, 128] +// CHECK-DAG: %[[B0_PREFETCH:.*]] = triton_gpu.convert_layout %[[B0_PREFETCH_SMEM]] +// CHECK: scf.for {{.*}} iter_args({{.*}}, {{.*}}, %[[arg_a0:.*]] = %[[A0]], %[[arg_b0:.*]] = %[[B0]], {{.*}}, %[[a0_prefetch:.*]] = %[[A0_PREFETCH]], %[[b0_prefetch:.*]] = %[[B0_PREFETCH]] +// CHECK: %[[D_FIRST:.*]] = tt.dot %[[a0_prefetch]], %[[b0_prefetch:.*]], {{.*}} +// CHECK-DAG: %[[A_REM_SMEM:.*]] = tensor.extract_slice %[[arg_a0]][0, 16] [128, 16] +// CHECK-DAG: %[[A_REM:.*]] = triton_gpu.convert_layout %[[A_REM_SMEM]] +// CHECK-DAG: %[[B_REM_SMEM:.*]] = tensor.extract_slice %[[arg_b0]][16, 0] [16, 128] +// CHECK-DAG: %[[B_REM:.*]] = triton_gpu.convert_layout %[[B_REM_SMEM]] +// CHECK: tt.dot %[[A_REM]], %[[B_REM]], %[[D_FIRST:.*]] +// CHECK-DAG: %[[NEXT_A_PREFETCH_SMEM:.*]] = tensor.extract_slice {{.*}}[0, 0] [128, 16] +// CHECK-DAG: %[[NEXT_A_PREFETCH:.*]] = triton_gpu.convert_layout %[[NEXT_A_PREFETCH_SMEM]] +// CHECK-DAG: %[[NEXT_B_PREFETCH_SMEM:.*]] = tensor.extract_slice {{.*}}[0, 0] [16, 128] +// CHECK-DAG: %[[NEXT_B_PREFETCH:.*]] = triton_gpu.convert_layout %[[NEXT_B_PREFETCH_SMEM]] +// CHECK: scf.yield {{.*}}, {{.*}}, {{.*}}, {{.*}}, {{.*}}, %[[NEXT_A_PREFETCH]], %[[NEXT_B_PREFETCH]] +func @matmul_loop(%lb : index, %ub : index, %step : index, %A : !tt.ptr, %B : !tt.ptr) { + %a_ptr_init = tt.broadcast %A : (!tt.ptr) -> tensor<128x32x!tt.ptr, #AL> + %b_ptr_init = tt.broadcast %B : (!tt.ptr) -> tensor<32x128x!tt.ptr, #BL> + + %a_mask = arith.constant dense : tensor<128x32xi1, #AL> + %a_other = arith.constant dense<0.00e+00> : tensor<128x32xf16, #AL> + %b_mask = arith.constant dense : tensor<32x128xi1, #BL> + %b_other = arith.constant dense<0.00e+00> : tensor<32x128xf16, #BL> + %c_init = arith.constant dense<0.00e+00> : tensor<128x128xf32, #C> + + %a_off = arith.constant dense<4> : tensor<128x32xi32, #AL> + %b_off = arith.constant dense<4> : tensor<32x128xi32, #BL> + + %a_ = tt.load %a_ptr_init, %a_mask, %a_other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x32xf16, #AL> + %a_init = triton_gpu.convert_layout %a_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A> + %b_ = tt.load %b_ptr_init, %b_mask, %b_other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<32x128xf16, #BL> + %b_init = triton_gpu.convert_layout %b_ : (tensor<32x128xf16, #BL>) -> tensor<32x128xf16, #B> + + scf.for %iv = %lb to %ub step %step iter_args(%a_ptr = %a_ptr_init, %b_ptr = %b_ptr_init, %a = %a_init, %b = %b_init, %prev_c = %c_init) -> (tensor<128x32x!tt.ptr, #AL>, tensor<32x128x!tt.ptr, #BL>, tensor<128x32xf16, #A>, tensor<32x128xf16, #B>, tensor<128x128xf32, #C>) { + %a_op = triton_gpu.convert_layout %a : (tensor<128x32xf16, #A>) -> tensor<128x32xf16, #A_OP> + %b_op = triton_gpu.convert_layout %b : (tensor<32x128xf16, #B>) -> tensor<32x128xf16, #B_OP> + %c = tt.dot %a_op, %b_op, %prev_c {allowTF32 = true, transA = false, transB = false} : tensor<128x32xf16, #A_OP> * tensor<32x128xf16, #B_OP> -> tensor<128x128xf32, #C> + + %next_a_ptr = tt.addptr %a_ptr, %a_off : tensor<128x32x!tt.ptr, #AL>, tensor<128x32xi32, #AL> + %next_b_ptr = tt.addptr %b_ptr, %b_off : tensor<32x128x!tt.ptr, #BL>, tensor<32x128xi32, #BL> + %next_a_ = tt.load %next_a_ptr, %a_mask, %a_other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<128x32xf16, #AL> + %next_a = triton_gpu.convert_layout %next_a_ : (tensor<128x32xf16, #AL>) -> tensor<128x32xf16, #A> + %next_b_ = tt.load %next_b_ptr, %b_mask, %b_other {cache = 1 : i32, evict = 1 : i32, isVolatile = false} : tensor<32x128xf16, #BL> + %next_b = triton_gpu.convert_layout %b_ : (tensor<32x128xf16, #BL>) -> tensor<32x128xf16, #B> + + scf.yield %next_a_ptr, %next_b_ptr, %next_a, %next_b, %c : tensor<128x32x!tt.ptr, #AL>, tensor<32x128x!tt.ptr, #BL>, tensor<128x32xf16, #A>, tensor<32x128xf16, #B>, tensor<128x128xf32, #C> + } + return +} + diff --git a/test/lib/Analysis/CMakeLists.txt b/test/lib/Analysis/CMakeLists.txt new file mode 100644 index 000000000000..3b21835b7565 --- /dev/null +++ b/test/lib/Analysis/CMakeLists.txt @@ -0,0 +1,9 @@ +add_mlir_library(TritonTestAnalysis + TestAlias.cpp + TestAxisInfo.cpp + TestAllocation.cpp + TestMembar.cpp + + LINK_LIBS PUBLIC + TritonAnalysis +) \ No newline at end of file diff --git a/test/lib/Analysis/TestAlias.cpp b/test/lib/Analysis/TestAlias.cpp new file mode 100644 index 000000000000..b8fef4e938fb --- /dev/null +++ b/test/lib/Analysis/TestAlias.cpp @@ -0,0 +1,92 @@ +#include "mlir/IR/AsmState.h" +#include "mlir/Pass/Pass.h" +#include "triton/Analysis/Alias.h" +#include "triton/Analysis/Utility.h" +#include "triton/Dialect/TritonGPU/IR/Dialect.h" + +using namespace mlir; + +namespace { + +struct TestAliasPass + : public PassWrapper> { + + // LLVM15+ + // MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestAliasPass); + static void print(StringRef name, SmallVector &vals, + raw_ostream &os) { + if (vals.empty()) + return; + os << name << " -> "; + size_t i = 0; + for (auto val : vals) { + if (i != 0) + os << ","; + os << val; + ++i; + } + os << "\n"; + } + + StringRef getArgument() const final { return "test-print-alias"; } + StringRef getDescription() const final { + return "print the result of the alias analysis pass"; + } + + void runOnOperation() override { + Operation *operation = getOperation(); + auto &os = llvm::errs(); + auto op_name = SymbolTable::getSymbolName(operation).getValue().str(); + os << op_name << "\n"; + + SharedMemoryAliasAnalysis analysis(&getContext()); + analysis.run(operation); + + AsmState state(operation->getParentOfType()); + // Get operation ids of value's aliases + auto getAllocOpNames = [&](Value value) { + LatticeElement *latticeElement = + analysis.lookupLatticeElement(value); + SmallVector opNames; + if (latticeElement) { + auto &info = latticeElement->getValue(); + if (!info.getAllocs().empty()) { + for (auto &alias : info.getAllocs()) { + auto opName = + getValueOperandName(alias.getDefiningOp()->getResult(0), state); + opNames.push_back(std::move(opName)); + } + } + } + // Ensure deterministic output + std::sort(opNames.begin(), opNames.end()); + return opNames; + }; + + operation->walk([&](Operation *op) { + if (op->getNumResults() < 1) + return; + if (auto forOp = dyn_cast(op)) { + for (auto arg : llvm::enumerate(forOp.getRegionIterArgs())) { + auto operand = forOp.getOpOperandForRegionIterArg(arg.value()).get(); + auto opNames = getAllocOpNames(operand); + auto argName = getValueOperandName(arg.value(), state); + print(argName, opNames, os); + } + } + for (auto result : llvm::enumerate(op->getResults())) { + auto opNames = getAllocOpNames(result.value()); + auto resultName = getValueOperandName(result.value(), state); + print(resultName, opNames, os); + } + }); + } +}; + +} // namespace + +namespace mlir { +namespace test { +void registerTestAliasPass() { PassRegistration(); } +} // namespace test +} // namespace mlir diff --git a/test/lib/Analysis/TestAllocation.cpp b/test/lib/Analysis/TestAllocation.cpp new file mode 100644 index 000000000000..a29465630a18 --- /dev/null +++ b/test/lib/Analysis/TestAllocation.cpp @@ -0,0 +1,54 @@ +#include "mlir/Pass/Pass.h" +#include "triton/Analysis/Allocation.h" + +using namespace mlir; + +namespace { + +struct TestAllocationPass + : public PassWrapper> { + + // LLVM15+ + // MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestAllocationPass); + + StringRef getArgument() const final { return "test-print-allocation"; } + StringRef getDescription() const final { + return "print the result of the allocation pass"; + } + + void runOnOperation() override { + Operation *operation = getOperation(); + auto &os = llvm::errs(); + // Convert to std::string can remove quotes from op_name + auto op_name = SymbolTable::getSymbolName(operation).getValue().str(); + os << op_name << "\n"; + Allocation allocation(operation); + operation->walk([&](Operation *op) { + auto scratchBufferId = allocation.getBufferId(op); + if (scratchBufferId != Allocation::InvalidBufferId) { + size_t offset = allocation.getOffset(scratchBufferId); + size_t size = allocation.getAllocatedSize(scratchBufferId); + os << "scratch offset = " << offset << ", size = " << size << "\n"; + } + if (op->getNumResults() < 1) + return; + for (Value result : op->getResults()) { + auto bufferId = allocation.getBufferId(result); + if (bufferId != Allocation::InvalidBufferId) { + size_t offset = allocation.getOffset(bufferId); + size_t size = allocation.getAllocatedSize(bufferId); + os << "offset = " << offset << ", size = " << size << "\n"; + } + } + }); + os << "size = " << allocation.getSharedMemorySize() << "\n"; + } +}; + +} // namespace + +namespace mlir { +namespace test { +void registerTestAllocationPass() { PassRegistration(); } +} // namespace test +} // namespace mlir diff --git a/test/lib/Analysis/TestAxisInfo.cpp b/test/lib/Analysis/TestAxisInfo.cpp new file mode 100644 index 000000000000..94ff92f4d784 --- /dev/null +++ b/test/lib/Analysis/TestAxisInfo.cpp @@ -0,0 +1,69 @@ +#include "mlir/Pass/Pass.h" +#include "triton/Analysis/AxisInfo.h" + +using namespace mlir; + +namespace { + +struct TestAxisInfoPass + : public PassWrapper> { + + // LLVM15+ + // MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestAlignmentPass); + + void print(const std::string &name, raw_ostream &os, ArrayRef vals) { + os << name << ": ["; + for (size_t d = 0; d < vals.size(); d++) { + if (d != 0) + os << ", "; + os << vals[d]; + } + os << "]"; + } + + StringRef getArgument() const final { return "test-print-alignment"; } + StringRef getDescription() const final { + return "print the result of the alignment analysis pass"; + } + + void runOnOperation() override { + Operation *operation = getOperation(); + auto &os = llvm::errs(); + os << "Testing: " << operation->getName() << "\n"; + AxisInfoAnalysis analysis(&getContext()); + analysis.run(operation); + operation->walk([&](Operation *op) { + if (op->getNumResults() < 1) + return; + for (Value result : op->getResults()) { + // std::ostringstream oss; + // result.print(oss); + // os << " => "; + LatticeElement *latticeElement = + analysis.lookupLatticeElement(result); + if (!latticeElement) { + os << "None\n"; + return; + } + AxisInfo &info = latticeElement->getValue(); + print("Contiguity", os, info.getContiguity()); + os << " ; "; + print("Divisibility", os, info.getDivisibility()); + os << " ; "; + print("Constancy", os, info.getConstancy()); + os << " ( "; + result.print(os); + os << " ) "; + os << "\n"; + } + }); + } +}; + +} // namespace + +namespace mlir { +namespace test { +void registerTestAlignmentPass() { PassRegistration(); } +} // namespace test +} // namespace mlir diff --git a/test/lib/Analysis/TestMembar.cpp b/test/lib/Analysis/TestMembar.cpp new file mode 100644 index 000000000000..03a56cc10f4a --- /dev/null +++ b/test/lib/Analysis/TestMembar.cpp @@ -0,0 +1,52 @@ +#include "mlir/Dialect/GPU/GPUDialect.h" +#include "mlir/IR/Dialect.h" +#include "mlir/Pass/Pass.h" +#include "triton/Analysis/Allocation.h" +#include "triton/Analysis/Membar.h" + +using namespace mlir; + +namespace { + +struct TestMembarPass + : public PassWrapper> { + + // LLVM15+ + // MLIR_DEFINE_EXPLICIT_INTERNAL_INLINE_TYPE_ID(TestMembarPass); + + StringRef getArgument() const final { return "test-print-membar"; } + StringRef getDescription() const final { + return "print the result of the allocation pass"; + } + + void runOnOperation() override { + Operation *operation = getOperation(); + auto &os = llvm::errs(); + // Convert to std::string can remove quotes from op_name + auto op_name = SymbolTable::getSymbolName(operation).getValue().str(); + os << op_name << "\n"; + Allocation allocation(operation); + MembarAnalysis membarPass(&allocation); + membarPass.run(); + + size_t operationId = 0; + operation->walk([&](Operation *op) { + if (isa(op)) { + os << "Membar " << operationId << "\n"; + } + if (op->getNumRegions() == 0) { + // Don't count parent Operation to simplify the test. + operationId++; + } + return; + }); + } +}; + +} // namespace + +namespace mlir { +namespace test { +void registerTestMembarPass() { PassRegistration(); } +} // namespace test +} // namespace mlir diff --git a/test/lib/CMakeLists.txt b/test/lib/CMakeLists.txt new file mode 100644 index 000000000000..5c6d3ffe1884 --- /dev/null +++ b/test/lib/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(Analysis) \ No newline at end of file diff --git a/test/lit.cfg.py b/test/lit.cfg.py new file mode 100644 index 000000000000..a570c8e89186 --- /dev/null +++ b/test/lit.cfg.py @@ -0,0 +1,67 @@ +# -*- Python -*- + +import os +import platform +import re +import subprocess +import tempfile + +import lit.formats +import lit.util + +from lit.llvm import llvm_config +from lit.llvm.subst import ToolSubst +from lit.llvm.subst import FindTool + +# Configuration file for the 'lit' test runner + +# name: The name of this test suite +config.name = 'TRITON' + +config.test_format = lit.formats.ShTest(not llvm_config.use_lit_shell) + +# suffixes: A list of file extensions to treat as test files. +config.suffixes = ['.mlir'] + +# test_source_root: The root path where tests are located. +config.test_source_root = os.path.dirname(__file__) + +# test_exec_root: The root path where tests should be run. +config.test_exec_root = os.path.join(config.triton_obj_root, 'test') + +config.substitutions.append(('%PATH%', config.environment['PATH'])) +config.substitutions.append(('%shlibext', config.llvm_shlib_ext)) + +llvm_config.with_system_environment( + ['HOME', 'INCLUDE', 'LIB', 'TMP', 'TEMP']) + +# llvm_config.use_default_substitutions() + +# excludes: A list of directories to exclude from the testsuite. The 'Inputs' +# subdirectories contain auxiliary inputs for various tests in their parent +# directories. +config.excludes = ['Inputs', 'Examples', 'CMakeLists.txt', 'README.txt', 'LICENSE.txt'] + +# test_source_root: The root path where tests are located. +config.test_source_root = os.path.dirname(__file__) + +# test_exec_root: The root path where tests should be run. +config.test_exec_root = os.path.join(config.triton_obj_root, 'test') +config.triton_tools_dir = os.path.join(config.triton_obj_root, 'bin') +config.filecheck_dir = os.path.join(config.triton_obj_root, 'bin', 'FileCheck') +tool_dirs = [config.triton_tools_dir, config.llvm_tools_dir, config.filecheck_dir] + +# Tweak the PATH to include the tools dir. +for d in tool_dirs: + llvm_config.with_environment('PATH', d, append_path=True) +tools = [ + 'triton-opt', + ToolSubst('%PYTHON', config.python_executable, unresolved='ignore'), +] + +llvm_config.add_tool_substitutions(tools, tool_dirs) + +# TODO: what's this? +llvm_config.with_environment('PYTHONPATH', [ + os.path.join(config.mlir_binary_dir, 'python_packages', 'triton'), +], append_path=True) diff --git a/test/lit.site.cfg.py.in b/test/lit.site.cfg.py.in new file mode 100644 index 000000000000..1118ed36b343 --- /dev/null +++ b/test/lit.site.cfg.py.in @@ -0,0 +1,23 @@ +@LIT_SITE_CFG_IN_HEADER@ + +import sys + +config.triton_obj_root = "@TRITON_BINARY_DIR@" +config.llvm_src_root = "@LLVM_SOURCE_DIR@" +config.llvm_obj_root = "@LLVM_BINARY_DIR@" +config.llvm_tools_dir = "@LLVM_TOOLS_DIR@" +config.llvm_lib_dir = "@LLVM_LIBS_DIR@" +config.llvm_shlib_dir = "@SHLIBDIR@" +config.llvm_shlib_ext = "@SHLIBEXT@" +config.llvm_exe_ext = "@EXEEXT@" +config.lit_tools_dir = "@LLVM_LIT_TOOLS_DIR@" +config.mlir_binary_dir = "@MLIR_BINARY_DIR@" +config.python_executable = "@Python3_EXECUTABLE@" +config.enable_bindings_python = @MLIR_ENABLE_BINDINGS_PYTHON@ + + +import lit.llvm +lit.llvm.initialize(lit_config, config) + +# Let the main config do the real work +lit_config.load_config(config, "@TRITON_SOURCE_DIR@/test/lit.cfg.py") diff --git a/unittest/Analysis/CMakeLists.txt b/unittest/Analysis/CMakeLists.txt new file mode 100644 index 000000000000..880c8117b162 --- /dev/null +++ b/unittest/Analysis/CMakeLists.txt @@ -0,0 +1,5 @@ +add_triton_ut( + NAME TestTritonAnalysis + SRCS UtilityTest.cpp + LIBS TritonAnalysis +) diff --git a/unittest/Analysis/UtilityTest.cpp b/unittest/Analysis/UtilityTest.cpp new file mode 100644 index 000000000000..2d25a8803807 --- /dev/null +++ b/unittest/Analysis/UtilityTest.cpp @@ -0,0 +1,29 @@ +//===- UtilityTest.cpp - Tests for +// Utility----------------------------------===// +// +//===----------------------------------------------------------------------===// + +#include "triton/Analysis/Utility.h" +#include + +namespace mlir { + +TEST(Analysis, reorder) { + SmallVector shape({10, 20, 30}); + { + SmallVector order({2, 1, 0}); + auto reordered = reorder(shape, order); + EXPECT_EQ(reordered[0], 30); + EXPECT_EQ(reordered[1], 20); + EXPECT_EQ(reordered[2], 10); + } + { + SmallVector order({1, 0, 2}); + auto reordered = reorder(shape, order); + EXPECT_EQ(reordered[0], 20); + EXPECT_EQ(reordered[1], 10); + EXPECT_EQ(reordered[2], 30); + } +} + +} // namespace mlir diff --git a/unittest/CMakeLists.txt b/unittest/CMakeLists.txt new file mode 100644 index 000000000000..dfc47a66e3de --- /dev/null +++ b/unittest/CMakeLists.txt @@ -0,0 +1,29 @@ + +include (${CMAKE_CURRENT_SOURCE_DIR}/googletest.cmake) + +include(GoogleTest) +enable_testing() + +function(add_triton_ut) + set(options) + set(oneValueArgs NAME) + set(multiValueArgs SRCS LIBS) + cmake_parse_arguments(_ "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN}) + add_test(NAME ${__NAME} + COMMAND ${__NAME}) + add_executable( + ${__NAME} + ${__SRCS}) + target_link_libraries( + ${__NAME} + PRIVATE + GTest::gtest_main + gmock + ${__LIBS}) + + gtest_discover_tests(${__NAME}) +endfunction() + +add_subdirectory(Analysis) +add_subdirectory(Conversion) +add_subdirectory(Dialect) diff --git a/unittest/Conversion/CMakeLists.txt b/unittest/Conversion/CMakeLists.txt new file mode 100644 index 000000000000..b543b6c622ef --- /dev/null +++ b/unittest/Conversion/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(TritonGPUToLLVM) diff --git a/unittest/Conversion/TritonGPUToLLVM/CMakeLists.txt b/unittest/Conversion/TritonGPUToLLVM/CMakeLists.txt new file mode 100644 index 000000000000..172cd3bb898c --- /dev/null +++ b/unittest/Conversion/TritonGPUToLLVM/CMakeLists.txt @@ -0,0 +1,5 @@ +add_triton_ut( + NAME TestPtxAsmFormat + SRCS PTXAsmFormatTest.cpp + LIBS TritonGPUToLLVM +) diff --git a/unittest/Conversion/TritonGPUToLLVM/PTXAsmFormatTest.cpp b/unittest/Conversion/TritonGPUToLLVM/PTXAsmFormatTest.cpp new file mode 100644 index 000000000000..5703e0ceb1bf --- /dev/null +++ b/unittest/Conversion/TritonGPUToLLVM/PTXAsmFormatTest.cpp @@ -0,0 +1,147 @@ +#include "triton/Conversion/TritonGPUToLLVM/PTXAsmFormat.h" +#include "mlir/Dialect/Arithmetic/IR/Arithmetic.h" +#include "mlir/IR/Builders.h" +#include "triton/Dialect/Triton/IR/Dialect.h" + +#include + +namespace mlir { +namespace triton { +class PTXAsmFormatTest : public ::testing::Test { +protected: + static constexpr int numValues = 4; + + PTXAsmFormatTest() { + ctx.loadDialect(); + + createValues(); + } + + // Creates the test values. + void createValues() { + OpBuilder builder(&ctx); + builder.setInsertionPointToStart(&block); + + // a b1 value for predicate. + v[0] = builder.create(builder.getUnknownLoc(), 1, 1); + for (int i = 0; i < numValues; i++) { + v[i + 1] = + builder.create(builder.getUnknownLoc(), i, 32); + } + } + + MLIRContext ctx; + Block block; + Value v[numValues + 1]; +}; + +TEST_F(PTXAsmFormatTest, basic) { + PTXBuilder builder; + + // Create the operands needed by the instructions in the PTX code. + auto *cst = builder.newConstantOperand(1); + auto *val = builder.newOperand(v[1], "=r"); + + // create an instruction + auto &mov = *builder.create("mov.b16"); + + mov(val, cst).predicate(v[0]); + ASSERT_EQ(builder.dump(), "@$1 mov.b16 $0, 0x1;"); + + auto values = builder.getAllMLIRArgs(); + ASSERT_EQ(values[0], v[1]); // $0 -> v[1] + ASSERT_EQ(values[1], v[0]); // $1 -> v[0] + + auto constraints = builder.getConstraints(); + ASSERT_EQ(constraints, "=r,b"); // $0 -> =r, $1 -> b +} + +TEST_F(PTXAsmFormatTest, complexInstruction) { + using triton::CacheModifier; + using triton::EvictionPolicy; + + PTXBuilder builder; + + int width = 16; + int nWords = 2; + + Value predicateVal = v[0]; + Value addrVal = v[1]; + + auto addr = builder.newAddrOperand(addrVal, "l", 128 /*offset*/); + + bool isVolatile = false; + auto cache = triton::CacheModifier::CA; + auto cachePriority = triton::EvictionPolicy::EVICT_FIRST; + bool hasL2EvictPolicy = true; + + auto &ld = + builder + .create<>("ld") // + ->o("volatile", isVolatile) + .global() + .o("ca", cache == CacheModifier::CA) + .o("cg", cache == CacheModifier::CG) + .o("L1::evict_first", cachePriority == EvictionPolicy::EVICT_FIRST) + .o("L1::evict_last", cachePriority == EvictionPolicy::EVICT_LAST) + .o("L1::cache_hint", hasL2EvictPolicy) + .v(nWords) + .b(width); + + // Link the instruction to operands + ld(addr).predicate(predicateVal); + + EXPECT_EQ( + builder.dump(), + "@$1 ld.global.ca.L1::evict_first.L1::cache_hint.v2.b16 [ $0 + 128 ];"); + auto values = builder.getAllMLIRArgs(); + EXPECT_EQ(values[0], addrVal); // $0 -> predicate + EXPECT_EQ(values[1], predicateVal); // $1 -> addr + EXPECT_EQ(builder.getConstraints(), "l,b"); +} + +TEST_F(PTXAsmFormatTest, MultiLinePTX) { + PTXBuilder builder; + + auto *constVal = builder.newConstantOperand(1); + auto *valVal0 = builder.newOperand(v[1], "=r"); + auto *valVal1 = builder.newOperand(v[2], "=r"); + + auto &mov = *builder.create("mov"); + + mov(valVal0, constVal); + mov(valVal1, constVal); + mov(valVal1, valVal0); + + EXPECT_EQ(builder.dump(), "mov $0, 0x1;\n\t" + "mov $1, 0x1;\n\t" + "mov $1, $0;"); + + auto values = builder.getAllMLIRArgs(); + EXPECT_EQ(values[0], v[1]); // $0 -> v[1] + EXPECT_EQ(values[1], v[2]); // $1 -> v[2] +} + +TEST_F(PTXAsmFormatTest, onlyAttachMLIRArgs) { + PTXBuilder builder; + const char *ptxCode = + ".param .b64 param0;\n" // prepare param0 (format string) + "st.param.b64 [param0], %0;\n" + "st.param.b64 [param0], %1;\n" + "st.param.b64 [param0], %2;\n"; + + auto &ptxSnippet = *builder.create(ptxCode); + auto *opr0 = builder.newOperand(v[0], "r"); + auto *opr1 = builder.newOperand(v[1], "r"); + auto *opr2 = builder.newOperand(v[2], "r"); + ptxSnippet({opr1, opr2, opr0}, true); + + EXPECT_EQ(builder.dump(), ptxCode); + ASSERT_EQ(builder.getAllMLIRArgs()[0], v[1]); + ASSERT_EQ(builder.getAllMLIRArgs()[1], v[2]); + ASSERT_EQ(builder.getAllMLIRArgs()[2], v[0]); + ASSERT_EQ(builder.getAllMLIRArgs().size(), 3); +} + +} // namespace triton +} // namespace mlir diff --git a/unittest/Dialect/CMakeLists.txt b/unittest/Dialect/CMakeLists.txt new file mode 100644 index 000000000000..eba47a67ce2f --- /dev/null +++ b/unittest/Dialect/CMakeLists.txt @@ -0,0 +1 @@ +add_subdirectory(TritonGPU) diff --git a/unittest/Dialect/TritonGPU/CMakeLists.txt b/unittest/Dialect/TritonGPU/CMakeLists.txt new file mode 100644 index 000000000000..43f9e6801be3 --- /dev/null +++ b/unittest/Dialect/TritonGPU/CMakeLists.txt @@ -0,0 +1,6 @@ + +add_triton_ut( + NAME TestSwizzling + SRCS SwizzleTest.cpp + LIBS TritonGPUIR ${dialect_libs} ${conversion_libs} +) \ No newline at end of file diff --git a/unittest/Dialect/TritonGPU/SwizzleTest.cpp b/unittest/Dialect/TritonGPU/SwizzleTest.cpp new file mode 100644 index 000000000000..c7dc33d0a8b3 --- /dev/null +++ b/unittest/Dialect/TritonGPU/SwizzleTest.cpp @@ -0,0 +1,53 @@ +#include "triton/Dialect/TritonGPU/IR/Dialect.h" +#include + +using namespace mlir; +using mlir::triton::gpu::SharedEncodingAttr; + +struct swizzleParams { + int vec; + int perPhase; + int maxPhase; +}; + +struct ParamT { + std::array shape; + int opIdx; + int typeWidth; + swizzleParams refSwizzle; +}; + +class SwizzleDotOperandTestFixture : public ::testing::TestWithParam { +protected: + ParamType param; +}; + +TEST_P(SwizzleDotOperandTestFixture, DotOperands) { + auto params = GetParam(); + // init context + MLIRContext ctx; + ctx.loadDialect(); + // create encoding + auto parent = triton::gpu::MmaEncodingAttr::get(&ctx, 2, 0, {1, 1}); + auto encoding = + triton::gpu::DotOperandEncodingAttr::get(&ctx, params.opIdx, parent); + + // create element type + Type eltType = IntegerType::get(&ctx, params.typeWidth); + auto layout = + SharedEncodingAttr::get(&ctx, encoding, params.shape, {1, 0}, eltType); + + ASSERT_EQ(layout.getVec(), params.refSwizzle.vec); + ASSERT_EQ(layout.getPerPhase(), params.refSwizzle.perPhase); + ASSERT_EQ(layout.getMaxPhase(), params.refSwizzle.maxPhase); +} + +INSTANTIATE_TEST_SUITE_P(TestDotOperands, SwizzleDotOperandTestFixture, + ::testing::Values(ParamT{{128, 64}, 0, 16, {8, 1, 8}}, + ParamT{{64, 256}, 1, 16, {8, 1, 8}}, + ParamT{{128, 32}, 0, 16, {8, 2, 4}}, + ParamT{{32, 128}, 1, 16, {8, 1, 8}}, + ParamT{{32, 32}, 0, 16, {8, 2, 4}}, + ParamT{{32, 32}, 1, 16, {8, 2, 4}}, + ParamT{{16, 16}, 0, 16, {8, 4, 2}}, + ParamT{{16, 16}, 1, 16, {8, 4, 2}})); diff --git a/unittest/googletest.cmake b/unittest/googletest.cmake new file mode 100644 index 000000000000..41d3d4fa4101 --- /dev/null +++ b/unittest/googletest.cmake @@ -0,0 +1,23 @@ +include(FetchContent) + +set(GOOGLETEST_DIR "" CACHE STRING "Location of local GoogleTest repo to build against") + +if(GOOGLETEST_DIR) + set(FETCHCONTENT_SOURCE_DIR_GOOGLETEST ${GOOGLETEST_DIR} CACHE STRING "GoogleTest source directory override") +endif() + +FetchContent_Declare( + googletest + GIT_REPOSITORY https://github.com/google/googletest.git + GIT_TAG release-1.12.1 + ) + +FetchContent_GetProperties(googletest) + +if(NOT googletest_POPULATED) + FetchContent_Populate(googletest) + if (MSVC) + set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + endif() + add_subdirectory(${googletest_SOURCE_DIR} ${googletest_BINARY_DIR} EXCLUDE_FROM_ALL) +endif()