From 59e99381cace79bc9f93ca04f558dd574cb2ba6e Mon Sep 17 00:00:00 2001 From: Samuel Audet Date: Fri, 25 Oct 2024 18:07:28 +0900 Subject: [PATCH] * Upgrade presets for PyTorch 2.5.0 --- CHANGELOG.md | 2 +- README.md | 2 +- platform/pom.xml | 2 +- pytorch/README.md | 6 +- pytorch/cppbuild.sh | 6 +- pytorch/platform/gpu/pom.xml | 2 +- pytorch/platform/pom.xml | 2 +- pytorch/pom.xml | 2 +- pytorch/samples/pom.xml | 4 +- .../pytorch/AcceleratorHooksInterface.java | 4 + .../AdaptiveAvgPool1dImplCloneable.java | 2 +- .../AdaptiveAvgPool2dImplCloneable.java | 2 +- .../AdaptiveAvgPool3dImplCloneable.java | 2 +- ...aptiveLogSoftmaxWithLossImplCloneable.java | 2 +- .../AdaptiveMaxPool1dImplCloneable.java | 2 +- .../AdaptiveMaxPool2dImplCloneable.java | 2 +- .../AdaptiveMaxPool3dImplCloneable.java | 2 +- .../pytorch/AlphaDropoutImplCloneable.java | 2 +- .../java/org/bytedeco/pytorch/AnyModule.java | 2 +- .../java/org/bytedeco/pytorch/Argument.java | 48 +- .../pytorch/AutogradCompilerCall.java | 1 + .../org/bytedeco/pytorch/AutogradContext.java | 1 - .../pytorch/AvgPool1dImplCloneable.java | 2 +- .../pytorch/AvgPool2dImplCloneable.java | 2 +- .../pytorch/AvgPool3dImplCloneable.java | 2 +- .../pytorch/BCELossImplCloneable.java | 2 +- .../BCEWithLogitsLossImplCloneable.java | 2 +- .../java/org/bytedeco/pytorch/BFloat16.java | 6 +- .../pytorch/BatchNorm1dImplCloneable.java | 2 +- .../pytorch/BatchNorm2dImplCloneable.java | 2 +- .../pytorch/BatchNorm3dImplCloneable.java | 2 +- .../pytorch/BilinearImplCloneable.java | 2 +- .../org/bytedeco/pytorch/BuiltinModule.java | 8 +- .../bytedeco/pytorch/CELUImplCloneable.java | 2 +- .../pytorch/CTCLossImplCloneable.java | 2 +- .../bytedeco/pytorch/CUDAHooksInterface.java | 5 +- .../bytedeco/pytorch/ChunkBatchDataset.java | 3 +- .../pytorch/ChunkBatchSharedBatchDataset.java | 3 +- .../ChunkBatchSharedTensorBatchDataset.java | 3 +- .../pytorch/ChunkMapBatchDataset.java | 3 +- .../pytorch/ChunkMapTensorBatchDataset.java | 3 +- .../pytorch/ChunkTensorBatchDataset.java | 3 +- .../java/org/bytedeco/pytorch/ClassType.java | 4 +- .../org/bytedeco/pytorch/CompilationUnit.java | 2 +- .../bytedeco/pytorch/CompiledNodeArgs.java | 2 + .../pytorch/ConstantPad1dImplCloneable.java | 2 +- .../pytorch/ConstantPad2dImplCloneable.java | 2 +- .../pytorch/ConstantPad3dImplCloneable.java | 2 +- .../java/org/bytedeco/pytorch/Context.java | 19 +- .../bytedeco/pytorch/Conv1dImplCloneable.java | 2 +- .../bytedeco/pytorch/Conv2dImplCloneable.java | 2 +- .../bytedeco/pytorch/Conv3dImplCloneable.java | 2 +- .../bytedeco/pytorch/ConvTranspose1dImpl.java | 4 +- .../pytorch/ConvTranspose1dImplCloneable.java | 2 +- .../bytedeco/pytorch/ConvTranspose2dImpl.java | 4 +- .../pytorch/ConvTranspose2dImplCloneable.java | 2 +- .../bytedeco/pytorch/ConvTranspose3dImpl.java | 4 +- .../pytorch/ConvTranspose3dImplCloneable.java | 2 +- .../CosineEmbeddingLossImplCloneable.java | 2 +- .../CosineSimilarityImplCloneable.java | 2 +- .../CrossEntropyLossImplCloneable.java | 2 +- .../pytorch/CrossMapLRN2dImplCloneable.java | 2 +- .../bytedeco/pytorch/DistributedBackend.java | 6 +- .../pytorch/DistributedRandomSampler.java | 2 +- .../pytorch/DistributedSequentialSampler.java | 2 +- .../pytorch/Dropout2dImplCloneable.java | 2 +- .../pytorch/Dropout3dImplCloneable.java | 2 +- .../pytorch/DropoutImplCloneable.java | 2 +- .../bytedeco/pytorch/ELUImplCloneable.java | 2 +- .../pytorch/EmbeddingBagImplCloneable.java | 2 +- .../pytorch/EmbeddingImplCloneable.java | 2 +- .../FeatureAlphaDropoutImplCloneable.java | 2 +- .../pytorch/FlattenImplCloneable.java | 2 +- .../bytedeco/pytorch/FoldImplCloneable.java | 2 +- .../FractionalMaxPool2dImplCloneable.java | 2 +- .../FractionalMaxPool3dImplCloneable.java | 2 +- .../pytorch/FunctionCrossMapLRN2d.java | 2 +- .../org/bytedeco/pytorch/FunctionSchema.java | 8 +- .../gen/java/org/bytedeco/pytorch/Future.java | 2 +- .../bytedeco/pytorch/GELUImplCloneable.java | 2 +- .../bytedeco/pytorch/GLUImplCloneable.java | 2 +- .../pytorch/GRUCellImplCloneable.java | 2 +- .../bytedeco/pytorch/GRUImplCloneable.java | 2 +- .../org/bytedeco/pytorch/GenericDict.java | 2 +- .../org/bytedeco/pytorch/GenericList.java | 2 - .../gen/java/org/bytedeco/pytorch/Graph.java | 4 +- .../org/bytedeco/pytorch/GraphExecutor.java | 2 +- .../org/bytedeco/pytorch/GraphFunction.java | 5 +- .../pytorch/GroupNormImplCloneable.java | 2 +- .../bytedeco/pytorch/HIPHooksInterface.java | 6 +- .../gen/java/org/bytedeco/pytorch/Half.java | 2 +- .../pytorch/HardshrinkImplCloneable.java | 2 +- .../pytorch/HardtanhImplCloneable.java | 2 +- .../HingeEmbeddingLossImplCloneable.java | 2 +- .../pytorch/HuberLossImplCloneable.java | 2 +- .../bytedeco/pytorch/IPUHooksInterface.java | 4 +- .../gen/java/org/bytedeco/pytorch/IValue.java | 4 +- .../pytorch/IdentityImplCloneable.java | 2 +- .../org/bytedeco/pytorch/InputArchive.java | 12 +- .../pytorch/InstanceNorm1dImplCloneable.java | 2 +- .../pytorch/InstanceNorm2dImplCloneable.java | 2 +- .../pytorch/InstanceNorm3dImplCloneable.java | 2 +- .../bytedeco/pytorch/JavaBatchDataset.java | 3 +- .../pytorch/JavaStatefulBatchDataset.java | 3 +- .../JavaStatefulTensorBatchDataset.java | 3 +- .../pytorch/JavaStreamBatchDataset.java | 3 +- .../pytorch/JavaStreamTensorBatchDataset.java | 3 +- .../pytorch/JavaTensorBatchDataset.java | 3 +- .../java/org/bytedeco/pytorch/JitModule.java | 10 +- .../pytorch/KLDivLossImplCloneable.java | 2 +- .../bytedeco/pytorch/L1LossImplCloneable.java | 2 +- .../pytorch/LPPool1dImplCloneable.java | 2 +- .../pytorch/LPPool2dImplCloneable.java | 2 +- .../pytorch/LPPool3dImplCloneable.java | 2 +- .../pytorch/LSTMCellImplCloneable.java | 2 +- .../bytedeco/pytorch/LSTMImplCloneable.java | 2 +- .../pytorch/LayerNormImplCloneable.java | 2 +- .../pytorch/LeakyReLUImplCloneable.java | 2 +- .../org/bytedeco/pytorch/LiftedIValueArg.java | 34 + .../bytedeco/pytorch/LiftedIValueArgs.java | 44 + .../bytedeco/pytorch/LinearImplCloneable.java | 2 +- .../LocalResponseNormImplCloneable.java | 2 +- .../pytorch/LogSigmoidImplCloneable.java | 2 +- .../pytorch/LogSoftmaxImplCloneable.java | 2 +- .../bytedeco/pytorch/LongVaryingShape.java | 4 +- .../bytedeco/pytorch/MNISTBatchDataset.java | 3 +- .../pytorch/MNISTMapBatchDataset.java | 3 +- .../bytedeco/pytorch/MPSHooksInterface.java | 3 + .../pytorch/MSELossImplCloneable.java | 2 +- .../bytedeco/pytorch/MTIAHooksInterface.java | 6 + .../MarginRankingLossImplCloneable.java | 2 +- .../org/bytedeco/pytorch/MatchedSchema.java | 2 +- .../pytorch/MaxPool1dImplCloneable.java | 2 +- .../pytorch/MaxPool2dImplCloneable.java | 2 +- .../pytorch/MaxPool3dImplCloneable.java | 2 +- .../org/bytedeco/pytorch/MaxUnpool1dImpl.java | 2 +- .../pytorch/MaxUnpool1dImplCloneable.java | 2 +- .../org/bytedeco/pytorch/MaxUnpool2dImpl.java | 2 +- .../pytorch/MaxUnpool2dImplCloneable.java | 2 +- .../org/bytedeco/pytorch/MaxUnpool3dImpl.java | 2 +- .../pytorch/MaxUnpool3dImplCloneable.java | 2 +- .../java/org/bytedeco/pytorch/MetaBase.java | 46 +- .../bytedeco/pytorch/MishImplCloneable.java | 2 +- .../gen/java/org/bytedeco/pytorch/Module.java | 2 +- .../org/bytedeco/pytorch/ModuleDictImpl.java | 2 +- .../pytorch/ModuleDictImplCloneable.java | 2 +- .../org/bytedeco/pytorch/ModuleListImpl.java | 2 +- .../pytorch/ModuleListImplCloneable.java | 2 +- .../MultiLabelMarginLossImplCloneable.java | 2 +- ...MultiLabelSoftMarginLossImplCloneable.java | 2 +- .../pytorch/MultiMarginLossImplCloneable.java | 2 +- .../MultiheadAttentionImplCloneable.java | 2 +- .../pytorch/NLLLossImplCloneable.java | 2 +- .../org/bytedeco/pytorch/NamedTensorMeta.java | 2 +- .../gen/java/org/bytedeco/pytorch/Obj.java | 4 +- .../bytedeco/pytorch/OptimizerParamGroup.java | 1 + .../bytedeco/pytorch/OptionalDeviceGuard.java | 10 +- .../org/bytedeco/pytorch/OptionalType.java | 2 +- .../bytedeco/pytorch/PReLUImplCloneable.java | 2 +- .../PairwiseDistanceImplCloneable.java | 2 +- .../pytorch/ParameterDictImplCloneable.java | 2 +- .../pytorch/ParameterListImplCloneable.java | 2 +- .../java/org/bytedeco/pytorch/Pickler.java | 1 - .../pytorch/PixelShuffleImplCloneable.java | 2 +- .../pytorch/PixelUnshuffleImplCloneable.java | 2 +- .../pytorch/PoissonNLLLossImplCloneable.java | 2 +- .../pytorch/PrivateUse1HooksInterface.java | 6 +- .../bytedeco/pytorch/PyInterpreterVTable.java | 8 +- .../org/bytedeco/pytorch/PyObjectHolder.java | 2 +- .../pytorch/RNNCellImplCloneable.java | 2 +- .../bytedeco/pytorch/RNNImplCloneable.java | 2 +- .../bytedeco/pytorch/RReLUImplCloneable.java | 2 +- .../org/bytedeco/pytorch/RandomSampler.java | 2 +- .../java/org/bytedeco/pytorch/RangeValue.java | 4 +- .../bytedeco/pytorch/ReLU6ImplCloneable.java | 2 +- .../bytedeco/pytorch/ReLUImplCloneable.java | 2 +- .../pytorch/ReflectionPad1dImplCloneable.java | 2 +- .../pytorch/ReflectionPad2dImplCloneable.java | 2 +- .../pytorch/ReflectionPad3dImplCloneable.java | 2 +- .../ReplicationPad1dImplCloneable.java | 2 +- .../ReplicationPad2dImplCloneable.java | 2 +- .../ReplicationPad3dImplCloneable.java | 2 +- .../bytedeco/pytorch/SELUImplCloneable.java | 2 +- .../org/bytedeco/pytorch/SafePyObject.java | 7 +- .../pytorch/SavedTensorDefaultHooks.java | 21 +- .../pytorch/SavedTensorDefaultHooksTLS.java | 5 +- .../gen/java/org/bytedeco/pytorch/Scalar.java | 2 + .../org/bytedeco/pytorch/SequentialImpl.java | 2 +- .../pytorch/SequentialImplCloneable.java | 2 +- .../bytedeco/pytorch/SequentialSampler.java | 2 +- .../bytedeco/pytorch/SiLUImplCloneable.java | 2 +- .../pytorch/SigmoidImplCloneable.java | 2 +- .../gen/java/org/bytedeco/pytorch/Slice.java | 12 +- .../pytorch/SmoothL1LossImplCloneable.java | 2 +- .../pytorch/SoftMarginLossImplCloneable.java | 2 +- .../pytorch/Softmax2dImplCloneable.java | 2 +- .../pytorch/SoftmaxImplCloneable.java | 2 +- .../pytorch/SoftminImplCloneable.java | 2 +- .../pytorch/SoftplusImplCloneable.java | 2 +- .../pytorch/SoftshrinkImplCloneable.java | 2 +- .../pytorch/SoftsignImplCloneable.java | 2 +- .../gen/java/org/bytedeco/pytorch/Source.java | 12 +- .../org/bytedeco/pytorch/StreamSampler.java | 2 +- .../bytedeco/pytorch/StrideVaryingShape.java | 4 +- .../pytorch/StringGenericListDict.java | 2 +- .../java/org/bytedeco/pytorch/StringPair.java | 50 + .../bytedeco/pytorch/StringScalarTypeMap.java | 49 + .../bytedeco/pytorch/StringViewReader.java | 36 + .../bytedeco/pytorch/SwapSavedVariables.java | 2 +- .../java/org/bytedeco/pytorch/SymBool.java | 3 +- .../org/bytedeco/pytorch/SymBoolType.java | 2 +- .../org/bytedeco/pytorch/SymFloatType.java | 2 +- .../pytorch/SymIntOptionalVector.java | 91 + .../java/org/bytedeco/pytorch/SymIntType.java | 2 +- .../java/org/bytedeco/pytorch/SymNode.java | 4 +- .../bytedeco/pytorch/SymbolicShapeMeta.java | 2 + .../bytedeco/pytorch/TanhImplCloneable.java | 2 +- .../pytorch/TanhshrinkImplCloneable.java | 2 +- .../gen/java/org/bytedeco/pytorch/Tensor.java | 4 +- .../java/org/bytedeco/pytorch/TensorBase.java | 4 +- .../bytedeco/pytorch/TensorBatchDataset.java | 3 +- .../java/org/bytedeco/pytorch/TensorImpl.java | 8 +- .../org/bytedeco/pytorch/TensorIndex.java | 4 +- .../org/bytedeco/pytorch/TensorOptions.java | 12 +- .../bytedeco/pytorch/TensorTensorDict.java | 2 +- .../pytorch/ThresholdImplCloneable.java | 2 +- .../java/org/bytedeco/pytorch/TraceState.java | 10 +- .../TransformerDecoderImplCloneable.java | 2 +- .../TransformerDecoderLayerImplCloneable.java | 2 +- .../TransformerEncoderImplCloneable.java | 2 +- .../TransformerEncoderLayerImplCloneable.java | 2 +- .../pytorch/TransformerImplCloneable.java | 2 +- .../TripletMarginLossImplCloneable.java | 2 +- ...etMarginWithDistanceLossImplCloneable.java | 2 +- .../bytedeco/pytorch/UndefinedTensorImpl.java | 2 + .../pytorch/UnflattenImplCloneable.java | 2 +- .../bytedeco/pytorch/UnfoldImplCloneable.java | 2 +- .../java/org/bytedeco/pytorch/Unpickler.java | 2 + .../pytorch/UpsampleImplCloneable.java | 2 +- .../gen/java/org/bytedeco/pytorch/Work.java | 8 +- .../bytedeco/pytorch/WriteableTensorData.java | 1 - .../bytedeco/pytorch/XPUHooksInterface.java | 6 +- .../pytorch/ZeroPad1dImplCloneable.java | 2 +- .../pytorch/ZeroPad2dImplCloneable.java | 2 +- .../pytorch/ZeroPad3dImplCloneable.java | 2 +- .../pytorch/cuda/AnnotationEntry.java | 49 + .../pytorch/cuda/CTCLossDescriptor.java | 6 + .../bytedeco/pytorch/cuda/CUDAAllocator.java | 6 +- .../bytedeco/pytorch/cuda/DeviceStats.java | 37 +- .../org/bytedeco/pytorch/cuda/MemPool.java | 65 + .../bytedeco/pytorch/cuda/MemPoolContext.java | 54 + .../pytorch/cuda/ShareableHandle.java | 53 + .../bytedeco/pytorch/cuda/SnapshotInfo.java | 1 + .../java/org/bytedeco/pytorch/cuda/Stat.java | 10 +- .../org/bytedeco/pytorch/global/torch.java | 2746 +++++++++-------- .../bytedeco/pytorch/global/torch_cuda.java | 109 +- .../pytorch/cuda/AllocatorTraceTracker.java | 5 +- .../org/bytedeco/pytorch/presets/gloo.java | 8 +- .../org/bytedeco/pytorch/presets/torch.java | 41 +- .../bytedeco/pytorch/presets/torch_cuda.java | 6 +- .../pytorch/presets/torch_cuda_include.h | 3 +- 261 files changed, 2510 insertions(+), 1783 deletions(-) create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LiftedIValueArg.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/LiftedIValueArgs.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StringPair.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StringScalarTypeMap.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/StringViewReader.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/SymIntOptionalVector.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AnnotationEntry.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/MemPool.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/MemPoolContext.java create mode 100644 pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ShareableHandle.java diff --git a/CHANGELOG.md b/CHANGELOG.md index 06e25cc914b..e8cf6dd90de 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,7 +9,7 @@ * Build FFmpeg with zimg to enable zscale filter ([pull #1481](https://github.com/bytedeco/javacpp-presets/pull/1481)) * Enable PulseAudio support for FFmpeg on Linux ([pull #1472](https://github.com/bytedeco/javacpp-presets/pull/1472)) * Virtualize `btCollisionWorld`, `btOverlapFilterCallback`, `btOverlapCallback` from Bullet Physics SDK ([pull #1475](https://github.com/bytedeco/javacpp-presets/pull/1475)) - * Upgrade presets for OpenCV 4.10.0, FFmpeg 7.1, Spinnaker 4.0.0.116 ([pull #1524](https://github.com/bytedeco/javacpp-presets/pull/1524)), DNNL 3.5.3, OpenBLAS 0.3.28, CMINPACK 1.3.9, GSL 2.8, CPython 3.13.0, NumPy 2.1.2, SciPy 1.14.1, LLVM 19.1.2, LibRaw 0.21.2 ([pull #1520](https://github.com/bytedeco/javacpp-presets/pull/1520)), Tesseract 5.4.1, libffi 3.4.6, CUDA 12.6.0, cuDNN 9.3.0, NCCL 2.22.3, nvCOMP 4.0.0, OpenCL 3.0.16, NVIDIA Video Codec SDK 12.2.72, PyTorch 2.4.0 ([pull #1466](https://github.com/bytedeco/javacpp-presets/pull/1466)), SentencePiece 0.2.0, TensorFlow Lite 2.17.0, TensorRT 10.3.0.26, Triton Inference Server 2.48.0, ONNX 1.17.0, ONNX Runtime 1.19.2, TVM 0.17.0, and their dependencies + * Upgrade presets for OpenCV 4.10.0, FFmpeg 7.1, Spinnaker 4.0.0.116 ([pull #1524](https://github.com/bytedeco/javacpp-presets/pull/1524)), DNNL 3.5.3, OpenBLAS 0.3.28, CMINPACK 1.3.9, GSL 2.8, CPython 3.13.0, NumPy 2.1.2, SciPy 1.14.1, LLVM 19.1.2, LibRaw 0.21.2 ([pull #1520](https://github.com/bytedeco/javacpp-presets/pull/1520)), Tesseract 5.4.1, libffi 3.4.6, CUDA 12.6.0, cuDNN 9.3.0, NCCL 2.22.3, nvCOMP 4.0.0, OpenCL 3.0.16, NVIDIA Video Codec SDK 12.2.72, PyTorch 2.5.0 ([pull #1466](https://github.com/bytedeco/javacpp-presets/pull/1466)), SentencePiece 0.2.0, TensorFlow Lite 2.17.0, TensorRT 10.3.0.26, Triton Inference Server 2.48.0, ONNX 1.17.0, ONNX Runtime 1.19.2, TVM 0.17.0, and their dependencies ### January 29, 2024 version 1.5.10 * Introduce `macosx-arm64` builds for PyTorch ([pull #1463](https://github.com/bytedeco/javacpp-presets/pull/1463)) diff --git a/README.md b/README.md index 6d2c7defd3c..faef55e0e89 100644 --- a/README.md +++ b/README.md @@ -223,7 +223,7 @@ Each child module in turn relies by default on the included [`cppbuild.sh` scrip * NVIDIA Video Codec SDK 12.2.x https://developer.nvidia.com/nvidia-video-codec-sdk * OpenCL 3.0.x https://github.com/KhronosGroup/OpenCL-ICD-Loader * MXNet 1.9.x https://github.com/apache/incubator-mxnet - * PyTorch 2.4.x https://github.com/pytorch/pytorch + * PyTorch 2.5.x https://github.com/pytorch/pytorch * SentencePiece 0.2.0 https://github.com/google/sentencepiece * TensorFlow 1.15.x https://github.com/tensorflow/tensorflow * TensorFlow Lite 2.17.x https://github.com/tensorflow/tensorflow diff --git a/platform/pom.xml b/platform/pom.xml index db4f43e431b..937907d9633 100644 --- a/platform/pom.xml +++ b/platform/pom.xml @@ -292,7 +292,7 @@ org.bytedeco pytorch-platform - 2.4.0-${project.version} + 2.5.0-${project.version} org.bytedeco diff --git a/pytorch/README.md b/pytorch/README.md index e5cccb5525b..80a65ffa627 100644 --- a/pytorch/README.md +++ b/pytorch/README.md @@ -9,7 +9,7 @@ Introduction ------------ This directory contains the JavaCPP Presets module for: - * PyTorch 2.4.0 https://pytorch.org/ + * PyTorch 2.5.0 https://pytorch.org/ Please refer to the parent README.md file for more detailed information about the JavaCPP Presets. @@ -48,14 +48,14 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic org.bytedeco pytorch-platform - 2.4.0-1.5.11-SNAPSHOT + 2.5.0-1.5.11-SNAPSHOT org.bytedeco pytorch-platform-gpu - 2.4.0-1.5.11-SNAPSHOT + 2.5.0-1.5.11-SNAPSHOT diff --git a/pytorch/cppbuild.sh b/pytorch/cppbuild.sh index 3512f84a4de..7434bfa9eeb 100755 --- a/pytorch/cppbuild.sh +++ b/pytorch/cppbuild.sh @@ -38,7 +38,7 @@ if [[ $PLATFORM == windows* ]]; then export PYTHON_BIN_PATH=$(which python.exe) fi -PYTORCH_VERSION=2.4.1 +PYTORCH_VERSION=2.5.0 export PYTORCH_BUILD_VERSION="$PYTORCH_VERSION" export PYTORCH_BUILD_NUMBER=1 @@ -129,7 +129,7 @@ mkdir -p "$PYTHON_INSTALL_PATH" export CFLAGS="-I$CPYTHON_PATH/include/ -I$PYTHON_LIB_PATH/include/python/ -L$CPYTHON_PATH/lib/ -L$CPYTHON_PATH/libs/" export PYTHONNOUSERSITE=1 -$PYTHON_BIN_PATH -m pip install --target=$PYTHON_LIB_PATH setuptools==67.6.1 pyyaml==6.0.1 typing_extensions==4.8.0 +$PYTHON_BIN_PATH -m pip install --target=$PYTHON_LIB_PATH setuptools==67.6.1 pyyaml==6.0.2 typing_extensions==4.8.0 case $PLATFORM in linux-x86) @@ -184,6 +184,7 @@ sedinplace 's/ build_deps()/ build_deps(); sys.exit()/g' setup.py sedinplace 's/AND NOT DEFINED ENV{CUDAHOSTCXX}//g' cmake/public/cuda.cmake sedinplace 's/CMAKE_CUDA_FLAGS "/CMAKE_CUDA_FLAGS " --use-local-env /g' CMakeLists.txt +sedinplace '/pycore_opcode.h/d' torch/csrc/dynamo/cpython_defs.c functorch/csrc/dim/dim* sedinplace 's/using ExpandingArrayDouble/public: using ExpandingArrayDouble/g' ./torch/csrc/api/include/torch/nn/options/pooling.h # allow setting the build directory and passing CUDA options @@ -192,6 +193,7 @@ sedinplace 's/var.startswith(("BUILD_", "USE_", "CMAKE_"))/var.startswith(("BUIL # allow resizing std::vector sedinplace 's/TensorIndex(c10::nullopt_t)/TensorIndex(c10::nullopt_t none = None)/g' aten/src/ATen/TensorIndexing.h +sedinplace 's/TensorIndex(std::nullopt_t)/TensorIndex(std::nullopt_t none = None)/g' aten/src/ATen/TensorIndexing.h # add missing declarations sedinplace '/using ExampleType = ExampleType_;/a\ diff --git a/pytorch/platform/gpu/pom.xml b/pytorch/platform/gpu/pom.xml index 387956fdb79..0d8bd29ae81 100644 --- a/pytorch/platform/gpu/pom.xml +++ b/pytorch/platform/gpu/pom.xml @@ -12,7 +12,7 @@ org.bytedeco pytorch-platform-gpu - 2.4.0-${project.parent.version} + 2.5.0-${project.parent.version} JavaCPP Presets Platform GPU for PyTorch diff --git a/pytorch/platform/pom.xml b/pytorch/platform/pom.xml index a3ab5725b2d..4ff46efa3fd 100644 --- a/pytorch/platform/pom.xml +++ b/pytorch/platform/pom.xml @@ -12,7 +12,7 @@ org.bytedeco pytorch-platform - 2.4.0-${project.parent.version} + 2.5.0-${project.parent.version} JavaCPP Presets Platform for PyTorch diff --git a/pytorch/pom.xml b/pytorch/pom.xml index 3b1a9856d95..5f697e12b53 100644 --- a/pytorch/pom.xml +++ b/pytorch/pom.xml @@ -11,7 +11,7 @@ org.bytedeco pytorch - 2.4.0-${project.parent.version} + 2.5.0-${project.parent.version} JavaCPP Presets for PyTorch diff --git a/pytorch/samples/pom.xml b/pytorch/samples/pom.xml index ef136d7088d..1add9a6b25c 100644 --- a/pytorch/samples/pom.xml +++ b/pytorch/samples/pom.xml @@ -12,14 +12,14 @@ org.bytedeco pytorch-platform - 2.4.0-1.5.11-SNAPSHOT + 2.5.0-1.5.11-SNAPSHOT org.bytedeco pytorch-platform-gpu - 2.4.0-1.5.11-SNAPSHOT + 2.5.0-1.5.11-SNAPSHOT diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AcceleratorHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AcceleratorHooksInterface.java index 9fb904a24ff..7699b290118 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AcceleratorHooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AcceleratorHooksInterface.java @@ -45,4 +45,8 @@ public class AcceleratorHooksInterface extends Pointer { public native @Cast("c10::DeviceIndex") byte exchangeDevice(@Cast("c10::DeviceIndex") byte device); public native @Cast("c10::DeviceIndex") byte maybeExchangeDevice(@Cast("c10::DeviceIndex") byte device); + + public native @Cast("bool") boolean isPinnedPtr(@Const Pointer data); + + public native Allocator getPinnedMemoryAllocator(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java index 6cd0d41375f..08eb0049aea 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java @@ -33,6 +33,6 @@ public class AdaptiveAvgPool1dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java index 014f7184f1b..39c0ce609aa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java @@ -33,6 +33,6 @@ public class AdaptiveAvgPool2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java index 680a8c29567..d35872413fe 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java @@ -33,6 +33,6 @@ public class AdaptiveAvgPool3dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java index a1dfcbd3a2d..8dc7c4a65a1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java @@ -33,6 +33,6 @@ public class AdaptiveLogSoftmaxWithLossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java index 7182a820e06..96c3dea0bb2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java @@ -33,6 +33,6 @@ public class AdaptiveMaxPool1dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java index 1145fd54eb9..f35a31b6e95 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java @@ -33,6 +33,6 @@ public class AdaptiveMaxPool2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java index c73ce111401..f42c7694a04 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java @@ -33,6 +33,6 @@ public class AdaptiveMaxPool3dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java index db38d7884c4..3d96932be20 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java @@ -33,6 +33,6 @@ public class AlphaDropoutImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java index 3991d3564c1..89761c5e532 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java @@ -391,7 +391,7 @@ public class AnyModule extends Pointer { /** Creates a deep copy of an {@code AnyModule} if it contains a module, else an * empty {@code AnyModule} if it is empty. */ - public native @ByVal AnyModule clone(@ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + public native @ByVal AnyModule clone(@ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @ByVal AnyModule clone(); /** Assigns a module to the {@code AnyModule} (to circumvent the explicit diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Argument.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Argument.java index 9a807c22489..141bc6808ab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Argument.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Argument.java @@ -37,50 +37,50 @@ public class Argument extends Pointer { public Argument( @StdString BytePointer name/*=""*/, @Const @ByRef(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type, - @ByVal(nullValue = "std::optional(c10::nullopt)") IntOptional N, - @ByVal(nullValue = "std::optional(c10::nullopt)") IValueOptional default_value, + @ByVal(nullValue = "std::optional(std::nullopt)") IntOptional N, + @ByVal(nullValue = "std::optional(std::nullopt)") IValueOptional default_value, @Cast("bool") boolean kwarg_only/*=false*/, - @ByVal(nullValue = "std::optional(c10::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, type, N, default_value, kwarg_only, alias_info); } + @ByVal(nullValue = "std::optional(std::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, type, N, default_value, kwarg_only, alias_info); } private native void allocate( @StdString BytePointer name/*=""*/, @Const @ByRef(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type, - @ByVal(nullValue = "std::optional(c10::nullopt)") IntOptional N, - @ByVal(nullValue = "std::optional(c10::nullopt)") IValueOptional default_value, + @ByVal(nullValue = "std::optional(std::nullopt)") IntOptional N, + @ByVal(nullValue = "std::optional(std::nullopt)") IValueOptional default_value, @Cast("bool") boolean kwarg_only/*=false*/, - @ByVal(nullValue = "std::optional(c10::nullopt)") AliasInfoOptional alias_info); + @ByVal(nullValue = "std::optional(std::nullopt)") AliasInfoOptional alias_info); public Argument() { super((Pointer)null); allocate(); } private native void allocate(); public Argument( @StdString String name/*=""*/, @Const @ByRef(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type, - @ByVal(nullValue = "std::optional(c10::nullopt)") IntOptional N, - @ByVal(nullValue = "std::optional(c10::nullopt)") IValueOptional default_value, + @ByVal(nullValue = "std::optional(std::nullopt)") IntOptional N, + @ByVal(nullValue = "std::optional(std::nullopt)") IValueOptional default_value, @Cast("bool") boolean kwarg_only/*=false*/, - @ByVal(nullValue = "std::optional(c10::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, type, N, default_value, kwarg_only, alias_info); } + @ByVal(nullValue = "std::optional(std::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, type, N, default_value, kwarg_only, alias_info); } private native void allocate( @StdString String name/*=""*/, @Const @ByRef(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type, - @ByVal(nullValue = "std::optional(c10::nullopt)") IntOptional N, - @ByVal(nullValue = "std::optional(c10::nullopt)") IValueOptional default_value, + @ByVal(nullValue = "std::optional(std::nullopt)") IntOptional N, + @ByVal(nullValue = "std::optional(std::nullopt)") IValueOptional default_value, @Cast("bool") boolean kwarg_only/*=false*/, - @ByVal(nullValue = "std::optional(c10::nullopt)") AliasInfoOptional alias_info); + @ByVal(nullValue = "std::optional(std::nullopt)") AliasInfoOptional alias_info); public Argument( @StdString BytePointer name, @ByVal Type.TypePtr fake_type, @ByVal Type.TypePtr real_type, - @ByVal(nullValue = "std::optional(c10::nullopt)") IntOptional N, - @ByVal(nullValue = "std::optional(c10::nullopt)") IValueOptional default_value, + @ByVal(nullValue = "std::optional(std::nullopt)") IntOptional N, + @ByVal(nullValue = "std::optional(std::nullopt)") IValueOptional default_value, @Cast("bool") boolean kwarg_only/*=false*/, - @ByVal(nullValue = "std::optional(c10::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, fake_type, real_type, N, default_value, kwarg_only, alias_info); } + @ByVal(nullValue = "std::optional(std::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, fake_type, real_type, N, default_value, kwarg_only, alias_info); } private native void allocate( @StdString BytePointer name, @ByVal Type.TypePtr fake_type, @ByVal Type.TypePtr real_type, - @ByVal(nullValue = "std::optional(c10::nullopt)") IntOptional N, - @ByVal(nullValue = "std::optional(c10::nullopt)") IValueOptional default_value, + @ByVal(nullValue = "std::optional(std::nullopt)") IntOptional N, + @ByVal(nullValue = "std::optional(std::nullopt)") IValueOptional default_value, @Cast("bool") boolean kwarg_only/*=false*/, - @ByVal(nullValue = "std::optional(c10::nullopt)") AliasInfoOptional alias_info); + @ByVal(nullValue = "std::optional(std::nullopt)") AliasInfoOptional alias_info); public Argument( @StdString BytePointer name, @ByVal Type.TypePtr fake_type, @@ -93,18 +93,18 @@ public Argument( @StdString String name, @ByVal Type.TypePtr fake_type, @ByVal Type.TypePtr real_type, - @ByVal(nullValue = "std::optional(c10::nullopt)") IntOptional N, - @ByVal(nullValue = "std::optional(c10::nullopt)") IValueOptional default_value, + @ByVal(nullValue = "std::optional(std::nullopt)") IntOptional N, + @ByVal(nullValue = "std::optional(std::nullopt)") IValueOptional default_value, @Cast("bool") boolean kwarg_only/*=false*/, - @ByVal(nullValue = "std::optional(c10::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, fake_type, real_type, N, default_value, kwarg_only, alias_info); } + @ByVal(nullValue = "std::optional(std::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, fake_type, real_type, N, default_value, kwarg_only, alias_info); } private native void allocate( @StdString String name, @ByVal Type.TypePtr fake_type, @ByVal Type.TypePtr real_type, - @ByVal(nullValue = "std::optional(c10::nullopt)") IntOptional N, - @ByVal(nullValue = "std::optional(c10::nullopt)") IValueOptional default_value, + @ByVal(nullValue = "std::optional(std::nullopt)") IntOptional N, + @ByVal(nullValue = "std::optional(std::nullopt)") IValueOptional default_value, @Cast("bool") boolean kwarg_only/*=false*/, - @ByVal(nullValue = "std::optional(c10::nullopt)") AliasInfoOptional alias_info); + @ByVal(nullValue = "std::optional(std::nullopt)") AliasInfoOptional alias_info); public Argument( @StdString String name, @ByVal Type.TypePtr fake_type, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradCompilerCall.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradCompilerCall.java index 6a5e475e9f5..4bbcad83cba 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradCompilerCall.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradCompilerCall.java @@ -43,6 +43,7 @@ public class AutogradCompilerCall extends Pointer { public native @ByRef @NoOffset TensorArgs tensor_args(); public native AutogradCompilerCall tensor_args(TensorArgs setter); public native @StdVector @NoOffset SizeInput all_size_inputs(); public native AutogradCompilerCall all_size_inputs(SizeInput setter); + public native @ByRef LiftedIValueArgs lifted_ivalue_args(); public native AutogradCompilerCall lifted_ivalue_args(LiftedIValueArgs setter); public native @ByRef @Cast("std::vector*") @NoOffset LongVector dyn_size_inputs(); public native AutogradCompilerCall dyn_size_inputs(LongVector setter); public native @ByRef @NoOffset NodeCalls node_calls(); public native AutogradCompilerCall node_calls(NodeCalls setter); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java index 18f6e7a5b8d..67ef2a58781 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java @@ -43,7 +43,6 @@ public class AutogradContext extends Pointer { /** Can be used to save non-variable data for {@code backward}. */ - // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) /** Saves the list of variables for a future call to {@code backward}. This * should be called at most once from inside of {@code forward}. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java index 7dae207aa06..43036e23092 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java @@ -33,6 +33,6 @@ public class AvgPool1dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java index 9426de9839a..b1d2c5868f9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java @@ -33,6 +33,6 @@ public class AvgPool2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java index 49b36b2077f..ddcc3b31ae6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java @@ -33,6 +33,6 @@ public class AvgPool3dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java index 6c4b5a0eb53..d5659cbb7ed 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java @@ -33,6 +33,6 @@ public class BCELossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java index b1b8dcbec0d..66ed07f193a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java @@ -33,6 +33,6 @@ public class BCEWithLogitsLossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16.java index 4a93c34caac..d2bce48148c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16.java @@ -44,12 +44,14 @@ public class BFloat16 extends Pointer { public BFloat16(@Cast("unsigned short") short bits, @ByVal from_bits_t arg1) { super((Pointer)null); allocate(bits, arg1); } private native void allocate(@Cast("unsigned short") short bits, @ByVal from_bits_t arg1); - public BFloat16(float value) { super((Pointer)null); allocate(value); } - private native void allocate(float value); + /* implicit */ public BFloat16(float value) { super((Pointer)null); allocate(value); } +private native void allocate(float value); public native @Name("operator float") float asFloat(); // #if defined(__CUDACC__) && !defined(USE_ROCM) // #endif +// #if defined(__HIPCC__) && defined(USE_ROCM) +// #endif // #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) // #endif diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java index a487c9c2454..b71e14f0cd7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java @@ -33,6 +33,6 @@ public class BatchNorm1dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java index e07f2e7eb04..d0918ec9f81 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java @@ -33,6 +33,6 @@ public class BatchNorm2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java index 0e0c66b1df1..8ab453e2a3c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java @@ -33,6 +33,6 @@ public class BatchNorm3dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java index 7bda99227c6..36424e488c1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java @@ -33,6 +33,6 @@ public class BilinearImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinModule.java index bb6afe3ba45..bf470adbd4d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinModule.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinModule.java @@ -25,12 +25,12 @@ public class BuiltinModule extends SugaredValue { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public BuiltinModule(Pointer p) { super(p); } - public BuiltinModule(@StdString BytePointer name, @ByVal(nullValue = "std::optional(at::nullopt)") LongOptional version) { super((Pointer)null); allocate(name, version); } - private native void allocate(@StdString BytePointer name, @ByVal(nullValue = "std::optional(at::nullopt)") LongOptional version); + public BuiltinModule(@StdString BytePointer name, @ByVal(nullValue = "std::optional(std::nullopt)") LongOptional version) { super((Pointer)null); allocate(name, version); } + private native void allocate(@StdString BytePointer name, @ByVal(nullValue = "std::optional(std::nullopt)") LongOptional version); public BuiltinModule(@StdString BytePointer name) { super((Pointer)null); allocate(name); } private native void allocate(@StdString BytePointer name); - public BuiltinModule(@StdString String name, @ByVal(nullValue = "std::optional(at::nullopt)") LongOptional version) { super((Pointer)null); allocate(name, version); } - private native void allocate(@StdString String name, @ByVal(nullValue = "std::optional(at::nullopt)") LongOptional version); + public BuiltinModule(@StdString String name, @ByVal(nullValue = "std::optional(std::nullopt)") LongOptional version) { super((Pointer)null); allocate(name, version); } + private native void allocate(@StdString String name, @ByVal(nullValue = "std::optional(std::nullopt)") LongOptional version); public BuiltinModule(@StdString String name) { super((Pointer)null); allocate(name); } private native void allocate(@StdString String name); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java index c5755d11b03..d6b0878acc0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java @@ -33,6 +33,6 @@ public class CELUImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java index a10426829c4..e9bbaa3e6d4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java @@ -33,6 +33,6 @@ public class CTCLossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java index 2ce28032170..61b0355be09 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java @@ -65,7 +65,7 @@ public class CUDAHooksInterface extends AcceleratorHooksInterface { public native @ByVal Device getDeviceFromPtr(Pointer arg0); - public native @Cast("bool") boolean isPinnedPtr(@Const Pointer arg0); + public native @Cast("bool") boolean isPinnedPtr(@Const Pointer data); public native @Cast("bool") boolean hasCUDA(); @@ -119,5 +119,8 @@ public class CUDAHooksInterface extends AcceleratorHooksInterface { public native int getNumGPUs(); +// #ifdef USE_ROCM +// #endif + public native void deviceSynchronize(@Cast("c10::DeviceIndex") byte arg0); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java index 3756cba2e7b..16262b3cd70 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java @@ -30,7 +30,8 @@ public class ChunkBatchDataset extends Pointer { /** Returns a batch of data given an index. */ public native @ByVal ExampleVectorOptional get_batch(@Cast("size_t") long request); - /** Returns the size of the dataset, or an empty optional if it is unsized. */ + /** Returns the size of the dataset, or an empty std::optional if it is + * unsized. */ public native @ByVal SizeTOptional size(); /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java index 225630df0ab..14e40403032 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java @@ -30,7 +30,8 @@ public class ChunkBatchSharedBatchDataset extends Pointer { /** Returns a batch of data given an index. */ public native @ByVal ExampleVectorOptional get_batch(@Cast("size_t") long request); - /** Returns the size of the dataset, or an empty optional if it is unsized. */ + /** Returns the size of the dataset, or an empty std::optional if it is + * unsized. */ public native @ByVal SizeTOptional size(); /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java index 25f7ef669d2..b1949c1ee90 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java @@ -30,7 +30,8 @@ public class ChunkBatchSharedTensorBatchDataset extends Pointer { /** Returns a batch of data given an index. */ public native @ByVal TensorExampleVectorOptional get_batch(@Cast("size_t") long request); - /** Returns the size of the dataset, or an empty optional if it is unsized. */ + /** Returns the size of the dataset, or an empty std::optional if it is + * unsized. */ public native @ByVal SizeTOptional size(); /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java index 3a8b2770d2f..d6a4b57f3c9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java @@ -31,7 +31,8 @@ public class ChunkMapBatchDataset extends Pointer { public native @ByVal ExampleVector get_batch(@ByVal SizeTArrayRef request); public native @ByVal ExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request); - /** Returns the size of the dataset, or an empty optional if it is unsized. */ + /** Returns the size of the dataset, or an empty std::optional if it is + * unsized. */ public native @ByVal SizeTOptional size(); /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java index b0e5062563b..2daf765c87e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java @@ -31,7 +31,8 @@ public class ChunkMapTensorBatchDataset extends Pointer { public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request); public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request); - /** Returns the size of the dataset, or an empty optional if it is unsized. */ + /** Returns the size of the dataset, or an empty std::optional if it is + * unsized. */ public native @ByVal SizeTOptional size(); /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java index 81fdaf27545..97882153239 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java @@ -30,7 +30,8 @@ public class ChunkTensorBatchDataset extends Pointer { /** Returns a batch of data given an index. */ public native @ByVal TensorExampleVectorOptional get_batch(@Cast("size_t") long request); - /** Returns the size of the dataset, or an empty optional if it is unsized. */ + /** Returns the size of the dataset, or an empty std::optional if it is + * unsized. */ public native @ByVal SizeTOptional size(); /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassType.java index 4118b5e9e37..e3108caf0e5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassType.java @@ -227,10 +227,10 @@ public static class Property extends Pointer { public native @Const @ByRef FunctionVector getForwardPreHooks(); public native void checkForwardPreHookSchema( - int pre_hook_idx, + @Cast("size_t") long pre_hook_idx, @Const @ByRef FunctionSchema pre_hook_schema); public native void checkForwardHookSchema( - int hook_idx, + @Cast("size_t") long hook_idx, @Const @ByRef FunctionSchema hook_schema); public native void addMethod(Function method); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java index 954bb5cc311..09fe8c60f68 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java @@ -72,7 +72,7 @@ public enum FunctionType { Method(0), Hook(1), PreHook(2); @Const @ByRef ResolverVector defResolvers, @Const Self self, @Cast("bool") boolean shouldMangle/*=false*/, - @ByVal(nullValue = "std::optional(c10::nullopt)") SizeTOptional operator_set_version); + @ByVal(nullValue = "std::optional(std::nullopt)") SizeTOptional operator_set_version); public native @ByVal FunctionVector define( @Const @ByRef QualifiedNameOptional prefix, @Const @ByRef PropertyVector properties, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java index 1e6ede7a4dd..a8fb9170e18 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java @@ -29,6 +29,8 @@ public class CompiledNodeArgs extends Pointer { public native void collect(@Const @ByRef Tensor t); public native void collect(@Const @ByRef SymInt t); + public native void collect(@Cast("const c10::ArrayRef*") @ByRef Pointer t, @Cast("bool") boolean is_output); + public native void collect(@Const @ByRef IValue iv, @Cast("bool") boolean nested/*=false*/); public native void collect(@Const @ByRef IValue iv); public native void collect(@Const @ByRef Scalar t); public native void collect(@Const @ByRef TensorOptions t); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java index 6879bed85d7..1b0e82b95b7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java @@ -33,6 +33,6 @@ public class ConstantPad1dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java index 34d9fde521d..7d715830bf8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java @@ -33,6 +33,6 @@ public class ConstantPad2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java index 35b21d86868..c8b5791727f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java @@ -33,6 +33,6 @@ public class ConstantPad3dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java index 148e6fd68be..0eb75a84e05 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java @@ -39,11 +39,18 @@ public class Context extends Pointer { public native @Const @ByRef Generator defaultGenerator(@ByVal Device device); public native @Const @ByRef AcceleratorHooksInterface getAcceleratorHooksInterface( - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceTypeOptional opt_device_type); + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceTypeOptional opt_device_type); public native @Const @ByRef AcceleratorHooksInterface getAcceleratorHooksInterface(); public native @ByVal Device getDeviceFromPtr(Pointer data, DeviceType device_type); public native @ByVal Device getDeviceFromPtr(Pointer data, @Cast("c10::DeviceType") byte device_type); - public static native @Cast("bool") boolean isPinnedPtr(@Const Pointer data); + public native @Cast("bool") boolean isPinnedPtr( + @Const Pointer data, + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceTypeOptional device_type); + public native @Cast("bool") boolean isPinnedPtr( + @Const Pointer data); + public native Allocator getPinnedMemoryAllocator( + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceTypeOptional device_type); + public native Allocator getPinnedMemoryAllocator(); public static native @Cast("bool") boolean hasOpenMP(); public static native @Cast("bool") boolean hasMKL(); public static native @Cast("bool") boolean hasLAPACK(); @@ -89,6 +96,8 @@ public class Context extends Pointer { public native void setBenchmarkLimitCuDNN(int arg0); public native @Cast("bool") boolean deterministicCuDNN(); public native void setDeterministicCuDNN(@Cast("bool") boolean arg0); + public native @Cast("bool") boolean deterministicMkldnn(); + public native void setDeterministicMkldnn(@Cast("bool") boolean arg0); public native @Cast("bool") boolean userEnabledNNPACK(); public native void setUserEnabledNNPACK(@Cast("bool") boolean e); @@ -115,6 +124,12 @@ public class Context extends Pointer { public native void setSDPUseCuDNN(@Cast("bool") boolean arg0); public native @Cast("bool") boolean userEnabledCuDNNSDP(); + public native void setAllowFP16BF16ReductionMathSDP(@Cast("bool") boolean arg0); + public native @Cast("bool") boolean allowFP16BF16ReductionMathSDP(); + + public native void setSDPUseOverrideable(@Cast("bool") boolean arg0); + public native @Cast("bool") boolean userEnabledOverrideableSDP(); + public native LinalgBackend linalgPreferredBackend(); public native void setLinalgPreferredBackend(LinalgBackend arg0); public native void setLinalgPreferredBackend(@Cast("at::LinalgBackend") byte arg0); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java index c074380c569..887123cd398 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java @@ -33,6 +33,6 @@ public class Conv1dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java index fddb42dce8a..17f5d769968 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java @@ -33,6 +33,6 @@ public class Conv2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java index 81aa2a34c37..ab62d30f438 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java @@ -33,6 +33,6 @@ public class Conv3dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java index 0d180745418..da8b16ca85a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java @@ -52,10 +52,10 @@ public ConvTranspose1dImpl( @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal ConvTranspose1dOptions options_); public native @ByVal Tensor forward( @Const @ByRef Tensor input, - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") LongArrayRefOptional output_size); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") LongArrayRefOptional output_size); public native @ByVal Tensor forward( @Const @ByRef Tensor input); public native @ByVal Tensor forward( @Const @ByRef Tensor input, - @ByRef(nullValue = "std::optional(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); + @ByRef(nullValue = "std::optional(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java index fab0daed646..8cf46437e02 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java @@ -33,6 +33,6 @@ public class ConvTranspose1dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java index e00009621c0..65f9b0eee95 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java @@ -52,10 +52,10 @@ public ConvTranspose2dImpl( @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal ConvTranspose2dOptions options_); public native @ByVal Tensor forward( @Const @ByRef Tensor input, - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") LongArrayRefOptional output_size); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") LongArrayRefOptional output_size); public native @ByVal Tensor forward( @Const @ByRef Tensor input); public native @ByVal Tensor forward( @Const @ByRef Tensor input, - @ByRef(nullValue = "std::optional(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); + @ByRef(nullValue = "std::optional(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java index 47ec0dc5f2a..814903c8188 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java @@ -33,6 +33,6 @@ public class ConvTranspose2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java index 6eb214538c3..9b4e1ca1437 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImpl.java @@ -52,10 +52,10 @@ public ConvTranspose3dImpl( @SharedPtr @Name("std::make_shared") private native void allocate(@ByVal ConvTranspose3dOptions options_); public native @ByVal Tensor forward( @Const @ByRef Tensor input, - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") LongArrayRefOptional output_size); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") LongArrayRefOptional output_size); public native @ByVal Tensor forward( @Const @ByRef Tensor input); public native @ByVal Tensor forward( @Const @ByRef Tensor input, - @ByRef(nullValue = "std::optional(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); + @ByRef(nullValue = "std::optional(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java index 15f840cae2e..cd1e70c1989 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose3dImplCloneable.java @@ -33,6 +33,6 @@ public class ConvTranspose3dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java index 51c7b9676c1..69d461b1b55 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineEmbeddingLossImplCloneable.java @@ -33,6 +33,6 @@ public class CosineEmbeddingLossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java index a4ca54aa408..a84940a5c31 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CosineSimilarityImplCloneable.java @@ -33,6 +33,6 @@ public class CosineSimilarityImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java index 5681332dba9..61709c87a6e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossEntropyLossImplCloneable.java @@ -33,6 +33,6 @@ public class CrossEntropyLossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java index 6142160122d..c8690fb4c1e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CrossMapLRN2dImplCloneable.java @@ -33,6 +33,6 @@ public class CrossMapLRN2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedBackend.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedBackend.java index 2087980d594..edeef500c1d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedBackend.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedBackend.java @@ -259,10 +259,10 @@ public native void registerOnCompletionHook( public native @Cast("bool") boolean hasHooks(); // Do not call this directly, use ProcessGroup::setGroupName instead. - public native void setGroupName(@StdString BytePointer name); - public native void setGroupName(@StdString String name); + public native void setGroupUid(@StdString BytePointer pg_uid); + public native void setGroupUid(@StdString String pg_uid); - public native @StdString BytePointer getGroupName(); + public native @StdString BytePointer getGroupUid(); public native void setGroupDesc(@StdString BytePointer desc); public native void setGroupDesc(@StdString String desc); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedRandomSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedRandomSampler.java index d20c7d8db7a..7d1f33ff5fc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedRandomSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedRandomSampler.java @@ -43,7 +43,7 @@ private native void allocate( @Cast("size_t") long size); /** Resets the {@code DistributedRandomSampler} to a new set of indices. */ - public native void reset(@ByVal(nullValue = "std::optional(c10::nullopt)") SizeTOptional new_size); + public native void reset(@ByVal(nullValue = "std::optional(std::nullopt)") SizeTOptional new_size); public native void reset(); /** Returns the next batch of indices. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSequentialSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSequentialSampler.java index 0c175158825..4ec8f3ce8ef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSequentialSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DistributedSequentialSampler.java @@ -42,7 +42,7 @@ private native void allocate( @Cast("size_t") long size); /** Resets the {@code DistributedSequentialSampler} to a new set of indices. */ - public native void reset(@ByVal(nullValue = "std::optional(c10::nullopt)") SizeTOptional new_size); + public native void reset(@ByVal(nullValue = "std::optional(std::nullopt)") SizeTOptional new_size); public native void reset(); /** Returns the next batch of indices. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java index 6069088dde1..ebdea8ac907 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout2dImplCloneable.java @@ -33,6 +33,6 @@ public class Dropout2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java index e45b7160813..ca8ac329608 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Dropout3dImplCloneable.java @@ -33,6 +33,6 @@ public class Dropout3dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java index 4a0a0ebfea8..9ac64515924 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/DropoutImplCloneable.java @@ -33,6 +33,6 @@ public class DropoutImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java index 4bf29a55373..e0ee274e3f6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ELUImplCloneable.java @@ -33,6 +33,6 @@ public class ELUImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java index c94ed7aaf50..29128c11376 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingBagImplCloneable.java @@ -33,6 +33,6 @@ public class EmbeddingBagImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java index 9d051b7ccef..72eb6424c5a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/EmbeddingImplCloneable.java @@ -33,6 +33,6 @@ public class EmbeddingImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java index fdc119d70d6..54063936189 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FeatureAlphaDropoutImplCloneable.java @@ -33,6 +33,6 @@ public class FeatureAlphaDropoutImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java index ba5fecfc3fc..5eac88e01ab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FlattenImplCloneable.java @@ -33,6 +33,6 @@ public class FlattenImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java index 6f9386cd223..3130a900b55 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FoldImplCloneable.java @@ -33,6 +33,6 @@ public class FoldImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java index bac58df5b66..8f80bd2860e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool2dImplCloneable.java @@ -33,6 +33,6 @@ public class FractionalMaxPool2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java index 024f8a373f8..d09ad17105f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FractionalMaxPool3dImplCloneable.java @@ -33,6 +33,6 @@ public class FractionalMaxPool3dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionCrossMapLRN2d.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionCrossMapLRN2d.java index a7d27c6917c..5c3e413ba04 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionCrossMapLRN2d.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionCrossMapLRN2d.java @@ -56,7 +56,7 @@ * static variable_list forward(AutogradContext *ctx, int n, Variable var) { * // Save data for backward in context * ctx->saved_data["n"] = n; - * var.mul_(2); + * var.mul_(n); * // Mark var as modified by inplace operation * ctx->mark_dirty({var}); * return {var}; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java index cd220694cc9..05bb778a862 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/FunctionSchema.java @@ -230,16 +230,16 @@ public FunctionSchema( public native @StdString BytePointer formatTypeMismatchMsg( @Const @ByRef Argument expected, @StdString BytePointer actual_type, - @ByVal(nullValue = "std::optional(c10::nullopt)") SizeTOptional _position, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringOptional value); + @ByVal(nullValue = "std::optional(std::nullopt)") SizeTOptional _position, + @ByVal(nullValue = "std::optional(std::nullopt)") StringOptional value); public native @StdString BytePointer formatTypeMismatchMsg( @Const @ByRef Argument expected, @StdString BytePointer actual_type); public native @StdString String formatTypeMismatchMsg( @Const @ByRef Argument expected, @StdString String actual_type, - @ByVal(nullValue = "std::optional(c10::nullopt)") SizeTOptional _position, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringOptional value); + @ByVal(nullValue = "std::optional(std::nullopt)") SizeTOptional _position, + @ByVal(nullValue = "std::optional(std::nullopt)") StringOptional value); public native @StdString String formatTypeMismatchMsg( @Const @ByRef Argument expected, @StdString String actual_type); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java index 6ab36a9fcc0..b3634b6a1b5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Future.java @@ -54,7 +54,7 @@ public class Future extends Pointer { */ public native void markCompleted( @ByVal IValue value, - @ByVal(nullValue = "std::optional > >(c10::nullopt)") WeakStorageVectorOptional storages); + @ByVal(nullValue = "std::optional > >(std::nullopt)") WeakStorageVectorOptional storages); public native void markCompleted( @ByVal IValue value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java index 205c9c28a55..6d9fb4d436d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GELUImplCloneable.java @@ -33,6 +33,6 @@ public class GELUImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java index d15a9829227..1c1af5bb3dd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GLUImplCloneable.java @@ -33,6 +33,6 @@ public class GLUImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java index 1900809c054..bb4008fef44 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUCellImplCloneable.java @@ -33,6 +33,6 @@ public class GRUCellImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java index 0961973ff5d..b83b30a5e92 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GRUImplCloneable.java @@ -33,6 +33,6 @@ public class GRUImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDict.java index 620a92bde70..c4cd32f5415 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDict.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericDict.java @@ -160,7 +160,7 @@ public class GenericDict extends Pointer { public native @Cast("bool") boolean is(@Const @ByRef GenericDict rhs); // private API for now because the return type will change to TypePtr - // instead of optional once types are mandatory. + // instead of std::optional once types are mandatory. public native @ByVal Type.TypePtr keyType(); public native @ByVal Type.TypePtr valueType(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericList.java index 28f562288d7..92a671ea699 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GenericList.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GenericList.java @@ -228,6 +228,4 @@ public class GenericList extends Pointer { // See [unsafe set type] for why this exists. public native void unsafeSetElementType(@ByVal Type.TypePtr t); - private static native @Namespace @Const @Name("c10::impl::ptr_to_first_element") IValue ptr_to_first_element(@Const @ByRef GenericList list); - public IValue ptr_to_first_element() { return ptr_to_first_element(this); } } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java index 88709059cef..6ca933d1b8a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Graph.java @@ -174,8 +174,8 @@ public native JitNode createClone( // Insert constant IValue into the graph. public native Value insertConstant( @Const @ByRef IValue val, - @ByVal(nullValue = "std::optional(c10::nullopt)") SourceRangeOptional loc, - @ByVal(nullValue = "std::optional(c10::nullopt)") @Cast("std::optional*") ScopeOptional scope); + @ByVal(nullValue = "std::optional(std::nullopt)") SourceRangeOptional loc, + @ByVal(nullValue = "std::optional(std::nullopt)") @Cast("std::optional*") ScopeOptional scope); public native Value insertConstant( @Const @ByRef IValue val); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutor.java index bc03282ad5f..d6294719eee 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphExecutor.java @@ -77,7 +77,7 @@ private native void allocate( // current global fusion strategy settings. public native @Const @ByRef ExecutionPlan getPlanFor( @ByRef IValueVector inputs, - @ByVal(nullValue = "std::optional(c10::nullopt)") SizeTOptional remaining_bailout_depth); + @ByVal(nullValue = "std::optional(std::nullopt)") SizeTOptional remaining_bailout_depth); public native @Const @ByRef ExecutionPlan getPlanFor( @ByRef IValueVector inputs); public native @ByVal GraphExecutorState getDebugState(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphFunction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphFunction.java index dcfd1622b2f..5501ee26907 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GraphFunction.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GraphFunction.java @@ -25,17 +25,16 @@ public class GraphFunction extends Function { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public GraphFunction(Pointer p) { super(p); } - // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) public GraphFunction( @ByVal QualifiedName name, @SharedPtr("torch::jit::Graph") @ByVal Graph graph, @ByVal GraphFunctionCreator function_creator, - @ByVal(nullValue = "std::optional(c10::nullopt)") ExecutorExecutionModeOptional executor_execution_mode) { super((Pointer)null); allocate(name, graph, function_creator, executor_execution_mode); } + @ByVal(nullValue = "std::optional(std::nullopt)") ExecutorExecutionModeOptional executor_execution_mode) { super((Pointer)null); allocate(name, graph, function_creator, executor_execution_mode); } private native void allocate( @ByVal QualifiedName name, @SharedPtr("torch::jit::Graph") @ByVal Graph graph, @ByVal GraphFunctionCreator function_creator, - @ByVal(nullValue = "std::optional(c10::nullopt)") ExecutorExecutionModeOptional executor_execution_mode); + @ByVal(nullValue = "std::optional(std::nullopt)") ExecutorExecutionModeOptional executor_execution_mode); public GraphFunction( @ByVal QualifiedName name, @SharedPtr("torch::jit::Graph") @ByVal Graph graph, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java index 78c3d08255c..eb5d6ea8078 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/GroupNormImplCloneable.java @@ -33,6 +33,6 @@ public class GroupNormImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksInterface.java index 8da8d17f9c6..2b33aa12303 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HIPHooksInterface.java @@ -24,7 +24,7 @@ // dispatched, to allow for separate compilation of HIP code). See // CUDAHooksInterface for more detailed motivation. @Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class HIPHooksInterface extends Pointer { +public class HIPHooksInterface extends AcceleratorHooksInterface { static { Loader.load(); } /** Default native constructor. */ public HIPHooksInterface() { super((Pointer)null); allocate(); } @@ -53,9 +53,13 @@ public class HIPHooksInterface extends Pointer { public native @Cast("c10::DeviceIndex") byte current_device(); + public native @Cast("bool") boolean isPinnedPtr(@Const Pointer data); + public native Allocator getPinnedMemoryAllocator(); public native void registerHIPTypes(Context arg0); public native int getNumGPUs(); + + public native @Cast("bool") boolean hasPrimaryContext(@Cast("c10::DeviceIndex") byte device_index); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Half.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Half.java index 42213b00ffe..5689b1f562f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Half.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Half.java @@ -44,7 +44,7 @@ public class Half extends Pointer { public Half(@Cast("unsigned short") short bits, @ByVal from_bits_t arg1) { super((Pointer)null); allocate(bits, arg1); } private native void allocate(@Cast("unsigned short") short bits, @ByVal from_bits_t arg1); -// #if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__) +// #if defined(__aarch64__) && !defined(__CUDACC__) // #else public Half(float value) { super((Pointer)null); allocate(value); } private native void allocate(float value); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java index 426499c46c4..a5bcfee780a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardshrinkImplCloneable.java @@ -33,6 +33,6 @@ public class HardshrinkImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java index 4989af3c526..ccacda645f0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HardtanhImplCloneable.java @@ -33,6 +33,6 @@ public class HardtanhImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java index 38ac8b2ff47..ec34852cd66 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HingeEmbeddingLossImplCloneable.java @@ -33,6 +33,6 @@ public class HingeEmbeddingLossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java index 076cd6e982d..792ebde777f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/HuberLossImplCloneable.java @@ -33,6 +33,6 @@ public class HuberLossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IPUHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IPUHooksInterface.java index 83cbe8f2edf..b0ff5e80d34 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IPUHooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IPUHooksInterface.java @@ -39,9 +39,9 @@ public class IPUHooksInterface extends Pointer { public native @Const @ByRef Generator getDefaultIPUGenerator( - @Cast("c10::DeviceIndex") byte device_index/*=-1*/); + @Cast("c10::DeviceIndex") byte device_index/*[[maybe_unused]] = -1*/); public native @Const @ByRef Generator getDefaultIPUGenerator(); - public native @ByVal Generator newIPUGenerator(@Cast("c10::DeviceIndex") byte device_index/*=-1*/); + public native @ByVal Generator newIPUGenerator(@Cast("c10::DeviceIndex") byte device_index/*[[maybe_unused]] = -1*/); public native @ByVal Generator newIPUGenerator(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java index 3e8da1cd5ca..9d88e6b219c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IValue.java @@ -549,11 +549,11 @@ public static class CompIdentityIValues extends Pointer { // TODO: There are several places that recurse over IValue. This is fragile. // This visitor should be used to recurse over ivalues. - public native @ByVal IValue deepcopy(@ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + public native @ByVal IValue deepcopy(@ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @ByVal IValue deepcopy(); public native @ByVal IValue deepcopy( @ByRef HashIdentityIValueMap memo, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @ByVal IValue deepcopy( @ByRef HashIdentityIValueMap memo); // Don't edit this just to add results for new tags; edit diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java index 6dfe9b10f23..023eb5322f7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/IdentityImplCloneable.java @@ -33,6 +33,6 @@ public class IdentityImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InputArchive.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InputArchive.java index 13d0b08664b..8b649b67251 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InputArchive.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InputArchive.java @@ -93,12 +93,12 @@ public class InputArchive extends Pointer { * is not specified, the module is loaded to the original device. */ public native void load_from( @StdString BytePointer filename, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native void load_from( @StdString BytePointer filename); public native void load_from( @StdString String filename, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native void load_from( @StdString String filename); @@ -107,7 +107,7 @@ public native void load_from( * is not specified, the module is loaded to the original device. */ public native void load_from( @Cast("std::istream*") @ByRef Pointer stream, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native void load_from( @Cast("std::istream*") @ByRef Pointer stream); @@ -115,14 +115,14 @@ public native void load_from( public native void load_from( @Cast("const char*") BytePointer data, @Cast("size_t") long size, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native void load_from( @Cast("const char*") BytePointer data, @Cast("size_t") long size); public native void load_from( String data, @Cast("size_t") long size, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native void load_from( String data, @Cast("size_t") long size); @@ -131,7 +131,7 @@ public native void load_from( public native void load_from( @Const @ByRef Reader read_func, @Const @ByRef SizeTSupplier size_func, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native void load_from( @Const @ByRef Reader read_func, @Const @ByRef SizeTSupplier size_func); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java index a32a79ac70c..b2fb2df1d12 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm1dImplCloneable.java @@ -33,6 +33,6 @@ public class InstanceNorm1dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java index 6d1e5157004..5eafaa7f59a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm2dImplCloneable.java @@ -33,6 +33,6 @@ public class InstanceNorm2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java index c3ee4a07aed..4b2edc06d9f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/InstanceNorm3dImplCloneable.java @@ -33,6 +33,6 @@ public class InstanceNorm3dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java index 8da6ce3e079..608ce2c4546 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaBatchDataset.java @@ -31,7 +31,8 @@ public class JavaBatchDataset extends Pointer { public native @ByVal ExampleVector get_batch(@ByVal SizeTArrayRef request); public native @ByVal ExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request); - /** Returns the size of the dataset, or an empty optional if it is unsized. */ + /** Returns the size of the dataset, or an empty std::optional if it is + * unsized. */ public native @ByVal SizeTOptional size(); /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulBatchDataset.java index 9b12839763f..4664fb1810f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulBatchDataset.java @@ -30,7 +30,8 @@ public class JavaStatefulBatchDataset extends Pointer { /** Returns a batch of data given an index. */ public native @ByVal ExampleVectorOptional get_batch(@Cast("size_t") long request); - /** Returns the size of the dataset, or an empty optional if it is unsized. */ + /** Returns the size of the dataset, or an empty std::optional if it is + * unsized. */ public native @ByVal SizeTOptional size(); /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorBatchDataset.java index 33b5d0556b5..839c619ddeb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStatefulTensorBatchDataset.java @@ -30,7 +30,8 @@ public class JavaStatefulTensorBatchDataset extends Pointer { /** Returns a batch of data given an index. */ public native @ByVal TensorExampleVectorOptional get_batch(@Cast("size_t") long request); - /** Returns the size of the dataset, or an empty optional if it is unsized. */ + /** Returns the size of the dataset, or an empty std::optional if it is + * unsized. */ public native @ByVal SizeTOptional size(); /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamBatchDataset.java index 851ed1e25c0..7af4fb9d0a3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamBatchDataset.java @@ -30,7 +30,8 @@ public class JavaStreamBatchDataset extends Pointer { /** Returns a batch of data given an index. */ public native @ByVal ExampleVector get_batch(@Cast("size_t") long request); - /** Returns the size of the dataset, or an empty optional if it is unsized. */ + /** Returns the size of the dataset, or an empty std::optional if it is + * unsized. */ public native @ByVal SizeTOptional size(); /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorBatchDataset.java index a1f32e91bf0..4d55b361f39 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaStreamTensorBatchDataset.java @@ -30,7 +30,8 @@ public class JavaStreamTensorBatchDataset extends Pointer { /** Returns a batch of data given an index. */ public native @ByVal TensorExampleVector get_batch(@Cast("size_t") long request); - /** Returns the size of the dataset, or an empty optional if it is unsized. */ + /** Returns the size of the dataset, or an empty std::optional if it is + * unsized. */ public native @ByVal SizeTOptional size(); /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java index a2dd76a09b5..6c496c8de86 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JavaTensorBatchDataset.java @@ -31,7 +31,8 @@ public class JavaTensorBatchDataset extends Pointer { public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request); public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request); - /** Returns the size of the dataset, or an empty optional if it is unsized. */ + /** Returns the size of the dataset, or an empty std::optional if it is + * unsized. */ public native @ByVal SizeTOptional size(); /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java index d94b8110966..8975fd0c437 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/JitModule.java @@ -225,7 +225,7 @@ public native void _save_for_mobile( public native @ByVal JitModule copy(); - public native @ByVal JitModule deepcopy(@ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + public native @ByVal JitModule deepcopy(@ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @ByVal JitModule deepcopy(); // Clones both the underlying `ClassType` and the module instance(data), this @@ -261,8 +261,12 @@ public native void _save_for_mobile( // A set of functions to maintain input shapes through torch.jit.save and // torch.jit.load. It only works on tensors and lists/dicts of tensors // because tracing is only supported by these types. - public native void store_traced_inputs(@StdString BytePointer func_name, @ByVal IValueVector inputs); - public native void store_traced_inputs(@StdString String func_name, @ByVal IValueVector inputs); + public native void store_traced_inputs( + @StdString BytePointer func_name, + @ByVal IValueVector inputs); + public native void store_traced_inputs( + @StdString String func_name, + @ByVal IValueVector inputs); public native @ByVal StringGenericListDict retrieve_traced_inputs(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java index 2b462bda9ff..d6402733e4b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/KLDivLossImplCloneable.java @@ -33,6 +33,6 @@ public class KLDivLossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java index 942f23243e0..d81f9b82564 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/L1LossImplCloneable.java @@ -33,6 +33,6 @@ public class L1LossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java index 6f9c31f27bd..982ea8b288b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool1dImplCloneable.java @@ -33,6 +33,6 @@ public class LPPool1dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java index 8a669e77598..7d95660954d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool2dImplCloneable.java @@ -33,6 +33,6 @@ public class LPPool2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool3dImplCloneable.java index e9c68f80705..39321ae58fb 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LPPool3dImplCloneable.java @@ -33,6 +33,6 @@ public class LPPool3dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java index bd36fcba0d4..7bf3d3cedd9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMCellImplCloneable.java @@ -33,6 +33,6 @@ public class LSTMCellImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java index 095b8b5a2a0..c11949dfb58 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LSTMImplCloneable.java @@ -33,6 +33,6 @@ public class LSTMImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java index 957be713423..bb2a51fc2df 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LayerNormImplCloneable.java @@ -33,6 +33,6 @@ public class LayerNormImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java index 674c19f672f..ab095de1b4c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LeakyReLUImplCloneable.java @@ -33,6 +33,6 @@ public class LeakyReLUImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LiftedIValueArg.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LiftedIValueArg.java new file mode 100644 index 00000000000..837e1c69214 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LiftedIValueArg.java @@ -0,0 +1,34 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.javacpp.chrono.*; +import static org.bytedeco.javacpp.global.chrono.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("torch::dynamo::autograd") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class LiftedIValueArg extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public LiftedIValueArg(Pointer p) { super(p); } + + + public LiftedIValueArg(@Const IValue ptr) { super((Pointer)null); allocate(ptr); } + private native void allocate(@Const IValue ptr); + + public native @Const IValue actual_ptr(); public native LiftedIValueArg actual_ptr(IValue setter); // lifetime handled by autograd node + public native @ByRef IValue proxy(); public native LiftedIValueArg proxy(IValue setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LiftedIValueArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LiftedIValueArgs.java new file mode 100644 index 00000000000..78cd58c7128 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LiftedIValueArgs.java @@ -0,0 +1,44 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.javacpp.chrono.*; +import static org.bytedeco.javacpp.global.chrono.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("torch::dynamo::autograd") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class LiftedIValueArgs extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public LiftedIValueArgs() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public LiftedIValueArgs(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public LiftedIValueArgs(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public LiftedIValueArgs position(long position) { + return (LiftedIValueArgs)super.position(position); + } + @Override public LiftedIValueArgs getPointer(long i) { + return new LiftedIValueArgs((Pointer)this).offsetAddress(i); + } + + public native @ByRef IValue next_proxy(@Const IValue actual_ptr); + + public native @StdVector LiftedIValueArg args(); public native LiftedIValueArgs args(LiftedIValueArg setter); + public native @Cast("size_t") long next(); public native LiftedIValueArgs next(long setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java index f8b44790cdf..70cbc341a3c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LinearImplCloneable.java @@ -33,6 +33,6 @@ public class LinearImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java index 9cce546e16c..f16096c5adc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LocalResponseNormImplCloneable.java @@ -33,6 +33,6 @@ public class LocalResponseNormImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java index 654cfcfa70f..5ee905e2715 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSigmoidImplCloneable.java @@ -33,6 +33,6 @@ public class LogSigmoidImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java index f726fa98aad..7ce28879394 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LogSoftmaxImplCloneable.java @@ -33,6 +33,6 @@ public class LogSoftmaxImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java index 06d4cdb1fb3..1d63ad6b939 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/LongVaryingShape.java @@ -33,8 +33,8 @@ public class LongVaryingShape extends Pointer { public LongVaryingShape(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... vec) { super((Pointer)null); allocate(vec); } private native void allocate(@ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... vec); - public LongVaryingShape(@ByVal(nullValue = "std::optional(c10::nullopt)") SizeTOptional size) { super((Pointer)null); allocate(size); } - private native void allocate(@ByVal(nullValue = "std::optional(c10::nullopt)") SizeTOptional size); + public LongVaryingShape(@ByVal(nullValue = "std::optional(std::nullopt)") SizeTOptional size) { super((Pointer)null); allocate(size); } + private native void allocate(@ByVal(nullValue = "std::optional(std::nullopt)") SizeTOptional size); public LongVaryingShape() { super((Pointer)null); allocate(); } private native void allocate(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java index 8b2d69f03c4..a02d4cbf109 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTBatchDataset.java @@ -33,7 +33,8 @@ public class MNISTBatchDataset extends Pointer { public native @ByVal ExampleVector get_batch(@ByVal SizeTArrayRef request); public native @ByVal ExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request); - /** Returns the size of the dataset, or an empty optional if it is unsized. */ + /** Returns the size of the dataset, or an empty std::optional if it is + * unsized. */ public native @ByVal SizeTOptional size(); /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java index e74b0c5bf73..c200af99268 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MNISTMapBatchDataset.java @@ -31,7 +31,8 @@ public class MNISTMapBatchDataset extends Pointer { public native @ByVal ExampleVector get_batch(@ByVal SizeTArrayRef request); public native @ByVal ExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request); - /** Returns the size of the dataset, or an empty optional if it is unsized. */ + /** Returns the size of the dataset, or an empty std::optional if it is + * unsized. */ public native @ByVal SizeTOptional size(); /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksInterface.java index 70ef363762f..69e63e118be 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MPSHooksInterface.java @@ -56,6 +56,7 @@ public class MPSHooksInterface extends AcceleratorHooksInterface { public native void emptyCache(); public native @Cast("size_t") long getCurrentAllocatedMemory(); public native @Cast("size_t") long getDriverAllocatedMemory(); + public native @Cast("size_t") long getRecommendedMaxMemory(); public native void setMemoryFraction(double arg0); public native void profilerStartTrace(@StdString BytePointer mode, @Cast("bool") boolean waitUntilCompleted); public native void profilerStartTrace(@StdString String mode, @Cast("bool") boolean waitUntilCompleted); @@ -68,5 +69,7 @@ public class MPSHooksInterface extends AcceleratorHooksInterface { public native @Cast("bool") boolean queryEvent(@Cast("uint32_t") int event_id); public native double elapsedTimeOfEvents(@Cast("uint32_t") int start_event_id, @Cast("uint32_t") int end_event_id); public native @Cast("bool") boolean hasPrimaryContext(@Cast("c10::DeviceIndex") byte device_index); + public native @Cast("bool") boolean isPinnedPtr(@Const Pointer data); + public native Allocator getPinnedMemoryAllocator(); // #undef FAIL_MPSHOOKS_FUNC } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java index c6ab7a9b589..2b820371d72 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MSELossImplCloneable.java @@ -33,6 +33,6 @@ public class MSELossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MTIAHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MTIAHooksInterface.java index b662bc88a4b..127e8dab16c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MTIAHooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MTIAHooksInterface.java @@ -67,4 +67,10 @@ public class MTIAHooksInterface extends AcceleratorHooksInterface { public native @ByVal Stream getDefaultStream(@Cast("c10::DeviceIndex") byte device); public native void setCurrentStream(@Const @ByRef Stream stream); + + public native @Cast("bool") boolean isPinnedPtr(@Const Pointer data); + + public native Allocator getPinnedMemoryAllocator(); + + public native @Cast("PyObject*") Pointer memoryStats(@Cast("c10::DeviceIndex") byte device); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java index b26c597d5d6..fc44db12edc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MarginRankingLossImplCloneable.java @@ -33,6 +33,6 @@ public class MarginRankingLossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MatchedSchema.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MatchedSchema.java index 8709d2253c2..a8ded7cef83 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MatchedSchema.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MatchedSchema.java @@ -21,7 +21,7 @@ // Try to match a list of inputs and keyword 'attributes' to this // schema. Return the flat list of positional inputs to the call or -// `c10::nullopt` on failure (`failure_messages` contains a good error +// `std::nullopt` on failure (`failure_messages` contains a good error // report in this case) @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java index d59f858dacf..43b9df0a2e8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool1dImplCloneable.java @@ -33,6 +33,6 @@ public class MaxPool1dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java index 42bc8609a1f..b0e5d57c06d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool2dImplCloneable.java @@ -33,6 +33,6 @@ public class MaxPool2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java index 67133dbf027..485b549940c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxPool3dImplCloneable.java @@ -33,6 +33,6 @@ public class MaxPool3dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java index 625105e6071..61803c554f3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImpl.java @@ -47,7 +47,7 @@ public class MaxUnpool1dImpl extends MaxUnpool1dImplBase { public native @ByVal Tensor forward( @Const @ByRef Tensor input, @Const @ByRef Tensor indices, - @Const @ByRef(nullValue = "std::optional >(c10::nullopt)") LongVectorOptional output_size); + @Const @ByRef(nullValue = "std::optional >(std::nullopt)") LongVectorOptional output_size); public native @ByVal Tensor forward( @Const @ByRef Tensor input, @Const @ByRef Tensor indices); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java index d6bb267cf64..837388116f1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool1dImplCloneable.java @@ -33,6 +33,6 @@ public class MaxUnpool1dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java index 6e7bc8c6d60..15132fe6ebd 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImpl.java @@ -47,7 +47,7 @@ public class MaxUnpool2dImpl extends MaxUnpool2dImplBase { public native @ByVal Tensor forward( @Const @ByRef Tensor input, @Const @ByRef Tensor indices, - @Const @ByRef(nullValue = "std::optional >(c10::nullopt)") LongVectorOptional output_size); + @Const @ByRef(nullValue = "std::optional >(std::nullopt)") LongVectorOptional output_size); public native @ByVal Tensor forward( @Const @ByRef Tensor input, @Const @ByRef Tensor indices); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java index 8d334a0831f..bfa0b627e7c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool2dImplCloneable.java @@ -33,6 +33,6 @@ public class MaxUnpool2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java index 4aff59aa3d0..0872a61c928 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImpl.java @@ -47,7 +47,7 @@ public class MaxUnpool3dImpl extends MaxUnpool3dImplBase { public native @ByVal Tensor forward( @Const @ByRef Tensor input, @Const @ByRef Tensor indices, - @Const @ByRef(nullValue = "std::optional >(c10::nullopt)") LongVectorOptional output_size); + @Const @ByRef(nullValue = "std::optional >(std::nullopt)") LongVectorOptional output_size); public native @ByVal Tensor forward( @Const @ByRef Tensor input, @Const @ByRef Tensor indices); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java index e78566b64f6..d3b8d4e9f74 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MaxUnpool3dImplCloneable.java @@ -33,6 +33,6 @@ public class MaxUnpool3dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MetaBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MetaBase.java index 95c61374702..d7a8a25c7c7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MetaBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MetaBase.java @@ -55,54 +55,12 @@ public class MetaBase extends Pointer { // Use this function whenever the kernel requires specific strides for the // output. If `strides` does not match the given output strides, proxy outputs // will be created and passed to the IMPL function. - public native void set_output_strided( - @Cast("int64_t") long output_idx, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @ByVal TensorOptions options, - @ByVal(nullValue = "at::DimnameList{}") DimnameArrayRef names); - public native void set_output_strided( - @Cast("int64_t") long output_idx, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides, - @ByVal TensorOptions options); - public native void set_output_strided( - @Cast("int64_t") long output_idx, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, - @ByVal TensorOptions options, - @ByVal(nullValue = "at::DimnameList{}") DimnameVector names); - public native void set_output_strided( - @Cast("int64_t") long output_idx, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, - @ByVal TensorOptions options); + // Use this function whenever the kernel knows how to handle arbitrary strided // outputs. This function has the same behavior as the old `set_output`: it // will only re-stride if the given output was resized. - public native void set_output_raw_strided( - @Cast("int64_t") long output_idx, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides_hint, - @ByVal TensorOptions options, - @ByVal(nullValue = "at::DimnameList{}") DimnameArrayRef names); - public native void set_output_raw_strided( - @Cast("int64_t") long output_idx, - @ByVal LongArrayRef sizes, - @ByVal LongArrayRef strides_hint, - @ByVal TensorOptions options); - public native void set_output_raw_strided( - @Cast("int64_t") long output_idx, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides_hint, - @ByVal TensorOptions options, - @ByVal(nullValue = "at::DimnameList{}") DimnameVector names); - public native void set_output_raw_strided( - @Cast("int64_t") long output_idx, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, - @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides_hint, - @ByVal TensorOptions options); + // Use this function if the kernel requires contiguous strides. // Alias for `set_output_strided`, but with contiguous strides. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java index 6f1070846b0..420d5fcaca2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MishImplCloneable.java @@ -33,6 +33,6 @@ public class MishImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java index f9d3898c5e2..91f47d5f86f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Module.java @@ -116,7 +116,7 @@ public class Module extends Pointer { /// public native @SharedPtr("torch::nn::Module") @ByVal @Virtual(subclasses=false, method="clone") @Cast({"", "std::shared_ptr"}) @Const({false, false, true}) Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); /** Applies the {@code function} to the {@code Module} and recursively to every submodule. * The function must accept a {@code Module&}. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java index 3754010b657..55db596d6a2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImpl.java @@ -132,7 +132,7 @@ public ModuleDictImpl( /** Special cloning function for {@code ModuleDict} because it does not use * {@code reset()}. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); /** {@code reset()} is empty for {@code ModuleDict}, since it does not have parameters of diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java index b88e191a895..34363741bcc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleDictImplCloneable.java @@ -40,6 +40,6 @@ public class ModuleDictImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java index d954a5fdc89..df8cd680e1d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImpl.java @@ -78,7 +78,7 @@ public class ModuleListImpl extends ModuleListImplCloneable { /** Special cloning function for {@code ModuleList} because it does not use * {@code reset()}. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); /** {@code reset()} is empty for {@code ModuleList}, since it does not have parameters of diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java index ea458ccd03d..8848c27ace5 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ModuleListImplCloneable.java @@ -33,6 +33,6 @@ public class ModuleListImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java index a7d869b062b..0ffa82e5ada 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelMarginLossImplCloneable.java @@ -33,6 +33,6 @@ public class MultiLabelMarginLossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java index 5be143e85fc..cc4aca73b52 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiLabelSoftMarginLossImplCloneable.java @@ -33,6 +33,6 @@ public class MultiLabelSoftMarginLossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java index fdcf9100d01..fd28779ff7c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiMarginLossImplCloneable.java @@ -33,6 +33,6 @@ public class MultiMarginLossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java index 40c6e621608..dae151e342a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/MultiheadAttentionImplCloneable.java @@ -33,6 +33,6 @@ public class MultiheadAttentionImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java index af3a5bbef63..ed902709516 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NLLLossImplCloneable.java @@ -33,6 +33,6 @@ public class NLLLossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java index 7415f637d88..d4a94805f27 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/NamedTensorMeta.java @@ -28,7 +28,7 @@ // actually exists outside of c10 and needs to be moved in. // TensorImpl has a unique_ptr field. -// XXX: Ideally we would just put optional> into TensorImpl. +// XXX: Ideally we would just put std::optional> into TensorImpl. // // This class has an important invariant: there must be at least ONE // non-wildcard diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Obj.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Obj.java index d94099887f1..42e4a2e27ab 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Obj.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Obj.java @@ -101,12 +101,12 @@ public class Obj extends Pointer { public native @IntrusivePtr("c10::ivalue::Object") @Cast({"", "c10::intrusive_ptr&"}) Obj copy(); public native @IntrusivePtr("c10::ivalue::Object") @Cast({"", "c10::intrusive_ptr&"}) Obj deepcopy( - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @IntrusivePtr("c10::ivalue::Object") @Cast({"", "c10::intrusive_ptr&"}) Obj deepcopy(); public native @IntrusivePtr("c10::ivalue::Object") @Cast({"", "c10::intrusive_ptr&"}) Obj deepcopy( @ByRef HashIdentityIValueMap memo, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @IntrusivePtr("c10::ivalue::Object") @Cast({"", "c10::intrusive_ptr&"}) Obj deepcopy( @ByRef HashIdentityIValueMap memo); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java index fc5c0095262..3c8a4f17d69 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptimizerParamGroup.java @@ -40,6 +40,7 @@ private native void allocate( @ByVal TensorVector params, @UniquePtr @ByVal OptimizerOptions options); + public native @Cast("bool") boolean has_options(); public native @ByRef OptimizerOptions options(); public native void set_options(@UniquePtr @ByVal OptimizerOptions options); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalDeviceGuard.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalDeviceGuard.java index 8c2c2a45155..c6ed0bdd840 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalDeviceGuard.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalDeviceGuard.java @@ -22,8 +22,8 @@ /** * A OptionalDeviceGuard is an RAII class that sets a device to some value on * initialization, and resets the device to its original value on destruction. - * Morally, a OptionalDeviceGuard is equivalent to optional, but - * with extra constructors and methods as appropriate. + * Morally, a OptionalDeviceGuard is equivalent to std::optional, + * but with extra constructors and methods as appropriate. * * Besides its obvious use (optionally applying a DeviceGuard), * OptionalDeviceGuard is often also used for the following idiom: @@ -41,12 +41,12 @@ * when you use the nullary constructor, or pass a nullopt to the constructor. * Uninitialized OptionalDeviceGuards do *nothing*; they do not know what the * original device was and they do not reset on destruction. This is why - * original_device() and current_device() return optional rather than - * Device (as they do in DeviceGuard), and also is why we didn't just + * original_device() and current_device() return std::optional rather + * than Device (as they do in DeviceGuard), and also is why we didn't just * provide OptionalDeviceGuard by default and hide DeviceGuard from users. * * The semantics of an OptionalDeviceGuard are exactly explained by thinking - * of it as an optional. In particular, an initialized + * of it as an std::optional. In particular, an initialized * OptionalDeviceGuard doesn't restore device to its value at construction; it * restores device to its value *at initialization*. So if you have the * program: diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalType.java index b5778f168a7..50596961803 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/OptionalType.java @@ -20,7 +20,7 @@ // This type represents an optional type. There is one `Optional` for // each element type. `Optional[T]` can accept both `T` and -// `None`(`c10::nullopt` in C++) +// `None`(`std::nullopt` in C++) // Subtype hierarchy for Optional: // - Optional[T] <: Optional[R] iff T <: R // - T <: Optional[R] if T <: R diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java index 413fc341488..9c0e6e27214 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PReLUImplCloneable.java @@ -33,6 +33,6 @@ public class PReLUImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java index 3d4e899ffd4..d0cc85e7200 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PairwiseDistanceImplCloneable.java @@ -33,6 +33,6 @@ public class PairwiseDistanceImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java index 12ef18c59c1..80dab117a88 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterDictImplCloneable.java @@ -33,6 +33,6 @@ public class ParameterDictImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplCloneable.java index 736b35f3946..6e0eba41812 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ParameterListImplCloneable.java @@ -33,6 +33,6 @@ public class ParameterListImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Pickler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Pickler.java index cada66973ad..908bf1e7e05 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Pickler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Pickler.java @@ -53,7 +53,6 @@ private native void allocate( TensorVector tensor_table, @ByVal TypeRenamer type_renamer, SharedClassTypeVector memoized_class_types); - // NOLINTNEXTLINE(bugprone-exception-escape) // Push protocol onto the stack public native void protocol(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java index 845136d230d..88d035a0849 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelShuffleImplCloneable.java @@ -33,6 +33,6 @@ public class PixelShuffleImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java index 96468042436..c2bd16bea44 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PixelUnshuffleImplCloneable.java @@ -33,6 +33,6 @@ public class PixelUnshuffleImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java index 15e8c4e0c8c..527c3d6bd6b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PoissonNLLLossImplCloneable.java @@ -33,6 +33,6 @@ public class PoissonNLLLossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksInterface.java index 1ddeed881cd..6164081d41d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PrivateUse1HooksInterface.java @@ -42,10 +42,14 @@ public class PrivateUse1HooksInterface extends AcceleratorHooksInterface { public native @ByVal Device getDeviceFromPtr(Pointer data); + public native @Cast("bool") boolean isPinnedPtr(@Const Pointer data); + public native Allocator getPinnedMemoryAllocator(); public native @Cast("bool") boolean hasPrimaryContext(@Cast("c10::DeviceIndex") byte device_index); public native void initPrivateUse1(); - public native void resizePrivateUse1Bytes(@Const @ByRef Storage storage, @Cast("size_t") long newsize); + public native void resizePrivateUse1Bytes( + @Const @ByRef Storage storage, + @Cast("size_t") long newsize); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreterVTable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreterVTable.java index 5c679ffc74b..65b995571d4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreterVTable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyInterpreterVTable.java @@ -117,6 +117,8 @@ public class PyInterpreterVTable extends Pointer { // Report the name of this interpreter public native @StdString BytePointer name(); + // Run Py_INCREF on a PyObject. + public native void incref(@Cast("PyObject*") Pointer pyobj); // Run Py_DECREF on a PyObject. We DO NOT assume the GIL is held on call // See NOTE [PyInterpreter::decref takes a `has_pyobj_slot` arg] public native void decref(@Cast("PyObject*") Pointer pyobj, @Cast("bool") boolean has_pyobj_slot); @@ -142,13 +144,15 @@ public native void python_op_registration_trampoline( DispatchKey arg1, @ByVal DispatchKeySet keyset, IValueVector stack, - @Cast("bool") boolean with_keyset); + @Cast("bool") boolean with_keyset, + @Cast("bool") boolean with_op); public native void python_op_registration_trampoline( @Const @ByRef OperatorHandle op, @Cast("c10::DispatchKey") short arg1, @ByVal DispatchKeySet keyset, IValueVector stack, - @Cast("bool") boolean with_keyset); + @Cast("bool") boolean with_keyset, + @Cast("bool") boolean with_op); public native void throw_abstract_impl_not_imported_error( @StdString BytePointer opname, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolder.java b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolder.java index d3a22121bd7..740ba0912c7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolder.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/PyObjectHolder.java @@ -30,7 +30,7 @@ public class PyObjectHolder extends Pointer { public native @Cast("PyObject*") Pointer getPyObject(); public native @ByVal InferredType tryToInferType(); - public native @ByVal IValue toIValue(@Const @ByRef Type.TypePtr type, @ByVal(nullValue = "std::optional(c10::nullopt)") IntOptional N); + public native @ByVal IValue toIValue(@Const @ByRef Type.TypePtr type, @ByVal(nullValue = "std::optional(std::nullopt)") IntOptional N); public native @ByVal IValue toIValue(@Const @ByRef Type.TypePtr type); public native @StdString BytePointer toStr(); public native @ByVal TensorVector extractTensors(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java index a58e1c1f3e9..e639f0998ed 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNCellImplCloneable.java @@ -33,6 +33,6 @@ public class RNNCellImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java index c017a7ddf13..887984d2d8b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RNNImplCloneable.java @@ -33,6 +33,6 @@ public class RNNImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java index d161895294d..9bf62ae6e35 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RReLUImplCloneable.java @@ -33,6 +33,6 @@ public class RReLUImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RandomSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RandomSampler.java index f0c11a6e65a..9824b0e7a2d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RandomSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RandomSampler.java @@ -37,7 +37,7 @@ public class RandomSampler extends Sampler { private native void allocate(@Cast("int64_t") long size); /** Resets the {@code RandomSampler} to a new set of indices. */ - public native void reset(@ByVal(nullValue = "std::optional(c10::nullopt)") SizeTOptional new_size); + public native void reset(@ByVal(nullValue = "std::optional(std::nullopt)") SizeTOptional new_size); public native void reset(); /** Returns the next batch of indices. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/RangeValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/RangeValue.java index 5e25ad746b2..425de827e5b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/RangeValue.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/RangeValue.java @@ -30,12 +30,12 @@ public RangeValue( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, @ByVal ValueVector input, - @ByVal(nullValue = "std::optional(c10::nullopt)") LongOptional static_len) { super((Pointer)null); allocate(loc, m, input, static_len); } + @ByVal(nullValue = "std::optional(std::nullopt)") LongOptional static_len) { super((Pointer)null); allocate(loc, m, input, static_len); } private native void allocate( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, @ByVal ValueVector input, - @ByVal(nullValue = "std::optional(c10::nullopt)") LongOptional static_len); + @ByVal(nullValue = "std::optional(std::nullopt)") LongOptional static_len); public RangeValue( @Const @ByRef SourceRange loc, @ByRef GraphFunction m, diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java index ba3a217f6c7..9806723c82b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLU6ImplCloneable.java @@ -33,6 +33,6 @@ public class ReLU6ImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java index f497b150897..d0e8e2d6bb1 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReLUImplCloneable.java @@ -33,6 +33,6 @@ public class ReLUImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java index d3404b69dc1..dc1627a0e12 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad1dImplCloneable.java @@ -33,6 +33,6 @@ public class ReflectionPad1dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java index 68c7ea4604c..b96cbbc9e94 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad2dImplCloneable.java @@ -33,6 +33,6 @@ public class ReflectionPad2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java index f98ba50e909..517c621cb57 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReflectionPad3dImplCloneable.java @@ -33,6 +33,6 @@ public class ReflectionPad3dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java index 3114010887c..535c901d30c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad1dImplCloneable.java @@ -33,6 +33,6 @@ public class ReplicationPad1dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java index 9adfa826174..33cf2b99920 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad2dImplCloneable.java @@ -33,6 +33,6 @@ public class ReplicationPad2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java index a98a2adecd0..bf134cb9bc7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ReplicationPad3dImplCloneable.java @@ -33,6 +33,6 @@ public class ReplicationPad3dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java index 09427906ab1..5545075376a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SELUImplCloneable.java @@ -33,6 +33,6 @@ public class SELUImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java index 9e7d278fede..581c71f1c86 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SafePyObject.java @@ -42,12 +42,11 @@ public class SafePyObject extends Pointer { @SharedPtr @Name("std::make_shared") private native void allocate(@Cast("PyObject*") Pointer data, PyInterpreter pyinterpreter); public SafePyObject(@ByRef(true) SafePyObject other) { super((Pointer)null); allocate(other); } @NoException(true) @SharedPtr @Name("std::make_shared") private native void allocate(@ByRef(true) SafePyObject other); - - // In principle this could be copyable if we add an incref to PyInterpreter - // but for now it's easier to just disallow it. - + // For now it's not used, so we just disallow it. + public native @ByRef @Name("operator =") SafePyObject put(@Const @ByRef SafePyObject other); + public native @ByRef PyInterpreter pyinterpreter(); public native @Cast("PyObject*") Pointer ptr(@Const PyInterpreter arg0); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooks.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooks.java index b7b9f8d1b0b..b9d348c8e77 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooks.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooks.java @@ -37,12 +37,12 @@ public class SavedTensorDefaultHooks extends Pointer { return new SavedTensorDefaultHooks((Pointer)this).offsetAddress(i); } - public static native void push_hooks(@Cast("PyObject*") Pointer pack_hook, @Cast("PyObject*") Pointer unpack_hook); - public static native void pop_hooks(); - public static native @ByVal @Cast("std::pair*") Pointer get_hooks(); + public static native void push_hooks( + @ByVal SafePyObject pack_hook, + @ByVal SafePyObject unpack_hook); + public static native @ByVal @Cast("std::pair*") Pointer pop_hooks(); + public static native @Cast("std::pair*") @Optional Pointer get_hooks(); public static native void lazy_initialize(); - public static native @ByVal @Cast("std::stack >*") Pointer get_stack(); - public static native void set_stack(@ByVal @Cast("std::stack >*") Pointer arg0); public static native @Const @ByRef SavedTensorDefaultHooksTLS get_tls_state(); public static native void set_tls_state(@Const @ByRef SavedTensorDefaultHooksTLS tls); @@ -52,10 +52,19 @@ public class SavedTensorDefaultHooks extends Pointer { // hooks, especially if their feature does not work with it. If they are // disabled, then the following will raise an error: // - Attempting to push_hooks - // - calling disable(message) with a non-zero stack (from get_stack) size + // - calling disable(message) with a non-zero stack (hooks) size public static native void disable(@StdString BytePointer error_message); public static native void disable(@StdString String error_message); public static native void enable(); public static native @Cast("bool") boolean is_enabled(); public static native @Const @ByRef StringOptional get_disabled_error_message(); + + // NOTE: [Deferring tensor pack/unpack hooks until runtime] + // To preserve eager semantics of pack/unpack hooks firing only once per saved + // variable, Dynamo/AOTAutograd need to defer hook firing until runtime. Using + // disable() would loud error at trace time, and pushing a no-op hook would + // fail when the traced code is wrapped in a disable_saved_tensors_hooks ctx. + // To do so, we disable these hooks during tracing. See + // https://github.com/pytorch/pytorch/issues/113263. + public static native @Cast("bool") boolean set_tracing(@Cast("bool") boolean is_tracing); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooksTLS.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooksTLS.java index b95d1ed973c..24bda97ddbe 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooksTLS.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SavedTensorDefaultHooksTLS.java @@ -38,7 +38,7 @@ public class SavedTensorDefaultHooksTLS extends Pointer { } // PyObject is defined in c10/util/python_stub.h - public native @ByRef @Cast("std::stack >*") Pointer stack(); public native SavedTensorDefaultHooksTLS stack(Pointer setter); + public native @ByRef @Cast("std::stack >*") Pointer stack(); public native SavedTensorDefaultHooksTLS stack(Pointer setter); // See NOTE: [Disabling SavedTensorDefaultHooks] for context // NOTE: [disabled_error_message invariant] @@ -46,4 +46,7 @@ public class SavedTensorDefaultHooksTLS extends Pointer { // We did this for efficiency (so we didn't have to keep a separate bool // around) public native @ByRef StringOptional disabled_error_message(); public native SavedTensorDefaultHooksTLS disabled_error_message(StringOptional setter); + + // See NOTE: [Deferring tensor pack/unpack hooks until runtime] + public native @Cast("bool") boolean is_tracing(); public native SavedTensorDefaultHooksTLS is_tracing(boolean setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Scalar.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Scalar.java index 7b6221bf1c9..26f0df279b7 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Scalar.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Scalar.java @@ -87,6 +87,8 @@ public class Scalar extends Pointer { // int64_t // #if defined(__APPLE__) || defined(__MACOSX) // #endif +// #if defined(_MSC_VER) +// #endif // #if defined(__linux__) && !defined(__ANDROID__) // #endif diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java index 536d39cb974..a90815f7955 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImpl.java @@ -112,7 +112,7 @@ public SequentialImpl( /** Special cloning function for {@code Sequential} because it does not use * {@code reset()}. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); /** {@code reset()} is empty for {@code Sequential}, since it does not have parameters of diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java index ea1fd2e8c4a..a9ab7ad321d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialImplCloneable.java @@ -33,6 +33,6 @@ public class SequentialImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialSampler.java index 5f025a4ccdf..fe1b7d37e55 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SequentialSampler.java @@ -32,7 +32,7 @@ public class SequentialSampler extends Sampler { private native void allocate(@Cast("size_t") long size); /** Resets the {@code SequentialSampler} to zero. */ - public native void reset(@ByVal(nullValue = "std::optional(c10::nullopt)") SizeTOptional new_size); + public native void reset(@ByVal(nullValue = "std::optional(std::nullopt)") SizeTOptional new_size); public native void reset(); /** Returns the next batch of indices. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java index 913af82d69d..b772a1c696c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SiLUImplCloneable.java @@ -33,6 +33,6 @@ public class SiLUImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java index 737a22793c0..96009cbb89c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SigmoidImplCloneable.java @@ -33,6 +33,6 @@ public class SigmoidImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Slice.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Slice.java index 2f3d17dd305..4537448d7ee 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Slice.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Slice.java @@ -35,13 +35,13 @@ public class Slice extends Pointer { } public Slice( - @ByVal(nullValue = "std::optional(c10::nullopt)") SymIntOptional start_index, - @ByVal(nullValue = "std::optional(c10::nullopt)") SymIntOptional stop_index, - @ByVal(nullValue = "std::optional(c10::nullopt)") SymIntOptional step_index) { super((Pointer)null); allocate(start_index, stop_index, step_index); } + @ByVal(nullValue = "std::optional(std::nullopt)") SymIntOptional start_index, + @ByVal(nullValue = "std::optional(std::nullopt)") SymIntOptional stop_index, + @ByVal(nullValue = "std::optional(std::nullopt)") SymIntOptional step_index) { super((Pointer)null); allocate(start_index, stop_index, step_index); } private native void allocate( - @ByVal(nullValue = "std::optional(c10::nullopt)") SymIntOptional start_index, - @ByVal(nullValue = "std::optional(c10::nullopt)") SymIntOptional stop_index, - @ByVal(nullValue = "std::optional(c10::nullopt)") SymIntOptional step_index); + @ByVal(nullValue = "std::optional(std::nullopt)") SymIntOptional start_index, + @ByVal(nullValue = "std::optional(std::nullopt)") SymIntOptional stop_index, + @ByVal(nullValue = "std::optional(std::nullopt)") SymIntOptional step_index); public Slice() { super((Pointer)null); allocate(); } private native void allocate(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java index c5cdb423dcd..18f97a2fa90 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SmoothL1LossImplCloneable.java @@ -33,6 +33,6 @@ public class SmoothL1LossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java index 4448b8bef90..c853feb8d1c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftMarginLossImplCloneable.java @@ -33,6 +33,6 @@ public class SoftMarginLossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java index 66c8fda041d..c19b88d90ef 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Softmax2dImplCloneable.java @@ -33,6 +33,6 @@ public class Softmax2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java index 1cd32a3be62..c0621ce906d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftmaxImplCloneable.java @@ -33,6 +33,6 @@ public class SoftmaxImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java index edc274ff3b7..b591fa897ac 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftminImplCloneable.java @@ -33,6 +33,6 @@ public class SoftminImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java index 75bed7ee51f..f659c5c0239 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftplusImplCloneable.java @@ -33,6 +33,6 @@ public class SoftplusImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java index bdd2f5e5af2..1dbc06969bc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftshrinkImplCloneable.java @@ -33,6 +33,6 @@ public class SoftshrinkImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java index 71e223419ef..da31f8a94d0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SoftsignImplCloneable.java @@ -33,6 +33,6 @@ public class SoftsignImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Source.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Source.java index fe1c73ab39a..85d1a3b5f6b 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Source.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Source.java @@ -43,13 +43,13 @@ public enum CopiesString { COPIES_STRING(0), DONT_COPY(1); public Source( @StringView BytePointer text_view, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringOptional filename, + @ByVal(nullValue = "std::optional(std::nullopt)") StringOptional filename, @Cast("size_t") long starting_line_no/*=0*/, @SharedPtr SourceRangeUnpickler gen_ranges/*=nullptr*/, CopiesString copies_str/*=torch::jit::Source::COPIES_STRING*/) { super((Pointer)null); allocate(text_view, filename, starting_line_no, gen_ranges, copies_str); } private native void allocate( @StringView BytePointer text_view, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringOptional filename, + @ByVal(nullValue = "std::optional(std::nullopt)") StringOptional filename, @Cast("size_t") long starting_line_no/*=0*/, @SharedPtr SourceRangeUnpickler gen_ranges/*=nullptr*/, CopiesString copies_str/*=torch::jit::Source::COPIES_STRING*/); @@ -59,13 +59,13 @@ private native void allocate( @StringView BytePointer text_view); public Source( @StringView String text_view, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringOptional filename, + @ByVal(nullValue = "std::optional(std::nullopt)") StringOptional filename, @Cast("size_t") long starting_line_no/*=0*/, @SharedPtr SourceRangeUnpickler gen_ranges/*=nullptr*/, @Cast("torch::jit::Source::CopiesString") int copies_str/*=torch::jit::Source::COPIES_STRING*/) { super((Pointer)null); allocate(text_view, filename, starting_line_no, gen_ranges, copies_str); } private native void allocate( @StringView String text_view, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringOptional filename, + @ByVal(nullValue = "std::optional(std::nullopt)") StringOptional filename, @Cast("size_t") long starting_line_no/*=0*/, @SharedPtr SourceRangeUnpickler gen_ranges/*=nullptr*/, @Cast("torch::jit::Source::CopiesString") int copies_str/*=torch::jit::Source::COPIES_STRING*/); @@ -76,12 +76,12 @@ private native void allocate( public Source( @ByVal StringCordView str, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringOptional filename, + @ByVal(nullValue = "std::optional(std::nullopt)") StringOptional filename, @Cast("size_t") long starting_line_no/*=0*/, @SharedPtr SourceRangeUnpickler gen_ranges/*=nullptr*/) { super((Pointer)null); allocate(str, filename, starting_line_no, gen_ranges); } private native void allocate( @ByVal StringCordView str, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringOptional filename, + @ByVal(nullValue = "std::optional(std::nullopt)") StringOptional filename, @Cast("size_t") long starting_line_no/*=0*/, @SharedPtr SourceRangeUnpickler gen_ranges/*=nullptr*/); public Source( diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSampler.java index d3a7dd283cd..044eac042fa 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSampler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StreamSampler.java @@ -36,7 +36,7 @@ public class StreamSampler extends BatchSizeSampler { private native void allocate(@Cast("size_t") long epoch_size); /** Resets the internal state of the sampler. */ - public native void reset(@ByVal(nullValue = "std::optional(c10::nullopt)") SizeTOptional new_size); + public native void reset(@ByVal(nullValue = "std::optional(std::nullopt)") SizeTOptional new_size); public native void reset(); /** Returns a {@code BatchSize} object with the number of elements to fetch in the diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVaryingShape.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVaryingShape.java index 49b83e8a1e1..28d31237742 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVaryingShape.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StrideVaryingShape.java @@ -31,8 +31,8 @@ public class StrideVaryingShape extends Pointer { public StrideVaryingShape(@ByVal StrideArrayRef vec) { super((Pointer)null); allocate(vec); } private native void allocate(@ByVal StrideArrayRef vec); - public StrideVaryingShape(@ByVal(nullValue = "std::optional(c10::nullopt)") SizeTOptional size) { super((Pointer)null); allocate(size); } - private native void allocate(@ByVal(nullValue = "std::optional(c10::nullopt)") SizeTOptional size); + public StrideVaryingShape(@ByVal(nullValue = "std::optional(std::nullopt)") SizeTOptional size) { super((Pointer)null); allocate(size); } + private native void allocate(@ByVal(nullValue = "std::optional(std::nullopt)") SizeTOptional size); public StrideVaryingShape() { super((Pointer)null); allocate(); } private native void allocate(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringGenericListDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringGenericListDict.java index 60ad540294e..7f081a59110 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/StringGenericListDict.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringGenericListDict.java @@ -147,7 +147,7 @@ public class StringGenericListDict extends Pointer { public native @Cast("bool") boolean is(@Const @ByRef StringGenericListDict rhs); // private API for now because the return type will change to TypePtr - // instead of optional once types are mandatory. + // instead of std::optional once types are mandatory. public native @ByVal Type.TypePtr keyType(); public native @ByVal Type.TypePtr valueType(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringPair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringPair.java new file mode 100644 index 00000000000..9114dcdb3e3 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringPair.java @@ -0,0 +1,50 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.javacpp.chrono.*; +import static org.bytedeco.javacpp.global.chrono.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@NoOffset @Name("std::pair") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StringPair extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public StringPair(Pointer p) { super(p); } + public StringPair(BytePointer firstValue, BytePointer secondValue) { this(); put(firstValue, secondValue); } + public StringPair(String firstValue, String secondValue) { this(); put(firstValue, secondValue); } + public StringPair() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef StringPair put(@ByRef StringPair x); + + + @MemberGetter public native @StdString BytePointer first(); public native StringPair first(BytePointer first); + @MemberGetter public native @StdString BytePointer second(); public native StringPair second(BytePointer second); + @MemberSetter @Index public native StringPair first(@StdString String first); + @MemberSetter @Index public native StringPair second(@StdString String second); + + public StringPair put(BytePointer firstValue, BytePointer secondValue) { + first(firstValue); + second(secondValue); + return this; + } + + public StringPair put(String firstValue, String secondValue) { + first(firstValue); + second(secondValue); + return this; + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringScalarTypeMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringScalarTypeMap.java new file mode 100644 index 00000000000..af89de09f18 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringScalarTypeMap.java @@ -0,0 +1,49 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.javacpp.chrono.*; +import static org.bytedeco.javacpp.global.chrono.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("std::unordered_map") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StringScalarTypeMap extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public StringScalarTypeMap(Pointer p) { super(p); } + public StringScalarTypeMap() { allocate(); } + private native void allocate(); + public native @Name("operator =") @ByRef StringScalarTypeMap put(@ByRef StringScalarTypeMap x); + + public boolean empty() { return size() == 0; } + public native long size(); + + @Index public native ScalarType get(@StdString BytePointer i); + public native StringScalarTypeMap put(@StdString BytePointer i, ScalarType value); + + public native void erase(@ByVal Iterator pos); + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *().first") @MemberGetter @StdString BytePointer first(); + public native @Name("operator *().second") @MemberGetter ScalarType second(); + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewReader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewReader.java new file mode 100644 index 00000000000..6e9cd32f0c5 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/StringViewReader.java @@ -0,0 +1,36 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.javacpp.chrono.*; +import static org.bytedeco.javacpp.global.chrono.*; + +import static org.bytedeco.pytorch.global.torch.*; + + +@Namespace("torch::jit") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class StringViewReader extends ReadAdapterInterface { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public StringViewReader(Pointer p) { super(p); } + + public StringViewReader(@Cast("const char*") BytePointer data) { super((Pointer)null); allocate(data); } + private native void allocate(@Cast("const char*") BytePointer data); + public StringViewReader(String data) { super((Pointer)null); allocate(data); } + private native void allocate(String data); + + public native @Cast("size_t") long size(); + + +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SwapSavedVariables.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SwapSavedVariables.java index edb8a9bffbc..e4a6a6d4e23 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SwapSavedVariables.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SwapSavedVariables.java @@ -31,7 +31,7 @@ public class SwapSavedVariables extends Pointer { public native void before(@ByRef SymInt t); public native void after(@ByRef SymInt t); - public native void before(@ByRef IValue t); + public native void before(@ByRef IValue iv); public native void after(@ByRef IValue t); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymBool.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymBool.java index dc89c77bf9d..71d44c274e9 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymBool.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymBool.java @@ -68,8 +68,7 @@ public class SymBool extends Pointer { public native @Cast("bool") boolean guard_bool(String file, @Cast("int64_t") long line); public native @Cast("bool") boolean expect_true(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); public native @Cast("bool") boolean expect_true(String file, @Cast("int64_t") long line); - public native @Cast("bool") boolean guard_size_oblivious(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); - public native @Cast("bool") boolean guard_size_oblivious(String file, @Cast("int64_t") long line); + public native @Cast("bool") boolean has_hint(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymBoolType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymBoolType.java index a071630b7cb..49a5c59b854 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymBoolType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymBoolType.java @@ -26,7 +26,7 @@ public class SymBoolType extends Type { public native @Cast("bool") boolean equals(@Const @ByRef Type rhs); public native @StdString BytePointer str(); - public native @StdString BytePointer annotation_str_impl(@Const @ByRef(nullValue = "c10::TypePrinter(nullptr)") TypePrinter printer); + public native @StdString BytePointer annotation_str_impl(@Const @ByRef(nullValue = "c10::TypePrinter(maybe_unused]] = nullptr)") TypePrinter printer); public native @StdString BytePointer annotation_str_impl(); @MemberGetter public static native TypeKind Kind(); // global singleton diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloatType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloatType.java index 776edd98717..29259c72355 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloatType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymFloatType.java @@ -26,7 +26,7 @@ public class SymFloatType extends Type { public native @Cast("bool") boolean equals(@Const @ByRef Type rhs); public native @StdString BytePointer str(); - public native @StdString BytePointer annotation_str_impl(@Const @ByRef(nullValue = "c10::TypePrinter(nullptr)") TypePrinter printer); + public native @StdString BytePointer annotation_str_impl(@Const @ByRef(nullValue = "c10::TypePrinter(maybe_unused]] = nullptr)") TypePrinter printer); public native @StdString BytePointer annotation_str_impl(); @MemberGetter public static native TypeKind Kind(); // global singleton diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntOptionalVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntOptionalVector.java new file mode 100644 index 00000000000..be6a55f427c --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntOptionalVector.java @@ -0,0 +1,91 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch; + +import org.bytedeco.pytorch.Allocator; +import org.bytedeco.pytorch.Function; +import org.bytedeco.pytorch.Module; +import org.bytedeco.javacpp.annotation.Cast; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.javacpp.chrono.*; +import static org.bytedeco.javacpp.global.chrono.*; + +import static org.bytedeco.pytorch.global.torch.*; + +@Name("std::vector >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) +public class SymIntOptionalVector extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public SymIntOptionalVector(Pointer p) { super(p); } + public SymIntOptionalVector(SymIntOptional value) { this(1); put(0, value); } + public SymIntOptionalVector(SymIntOptional ... array) { this(array.length); put(array); } + public SymIntOptionalVector() { allocate(); } + public SymIntOptionalVector(long n) { allocate(n); } + private native void allocate(); + private native void allocate(@Cast("size_t") long n); + public native @Name("operator =") @ByRef SymIntOptionalVector put(@ByRef SymIntOptionalVector x); + + public boolean empty() { return size() == 0; } + public native long size(); + public void clear() { resize(0); } + public native void resize(@Cast("size_t") long n); + + public SymIntOptional front() { return get(0); } + public SymIntOptional back() { return get(size() - 1); } + @Index(function = "at") public native @ByRef SymIntOptional get(@Cast("size_t") long i); + public native SymIntOptionalVector put(@Cast("size_t") long i, SymIntOptional value); + + public native @ByVal Iterator insert(@ByVal Iterator pos, @ByRef SymIntOptional value); + public native @ByVal Iterator erase(@ByVal Iterator pos); + public native @ByVal Iterator begin(); + public native @ByVal Iterator end(); + @NoOffset @Name("iterator") public static class Iterator extends Pointer { + public Iterator(Pointer p) { super(p); } + public Iterator() { } + + public native @Name("operator ++") @ByRef Iterator increment(); + public native @Name("operator ==") boolean equals(@ByRef Iterator it); + public native @Name("operator *") @ByRef @Const SymIntOptional get(); + } + + public SymIntOptional[] get() { + SymIntOptional[] array = new SymIntOptional[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE]; + for (int i = 0; i < array.length; i++) { + array[i] = get(i); + } + return array; + } + @Override public String toString() { + return java.util.Arrays.toString(get()); + } + + public SymIntOptional pop_back() { + long size = size(); + SymIntOptional value = get(size - 1); + resize(size - 1); + return value; + } + public SymIntOptionalVector push_back(SymIntOptional value) { + long size = size(); + resize(size + 1); + return put(size, value); + } + public SymIntOptionalVector put(SymIntOptional value) { + if (size() != 1) { resize(1); } + return put(0, value); + } + public SymIntOptionalVector put(SymIntOptional ... array) { + if (size() != array.length) { resize(array.length); } + for (int i = 0; i < array.length; i++) { + put(i, array[i]); + } + return this; + } +} + diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntType.java index 8a9ab5c729d..c4e3c33ce34 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntType.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymIntType.java @@ -26,7 +26,7 @@ public class SymIntType extends Type { public native @Cast("bool") boolean equals(@Const @ByRef Type rhs); public native @StdString BytePointer str(); - public native @StdString BytePointer annotation_str_impl(@Const @ByRef(nullValue = "c10::TypePrinter(nullptr)") TypePrinter printer); + public native @StdString BytePointer annotation_str_impl(@Const @ByRef(nullValue = "c10::TypePrinter(maybe_unused]] = nullptr)") TypePrinter printer); public native @StdString BytePointer annotation_str_impl(); @MemberGetter public static native TypeKind Kind(); // global singleton diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymNode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymNode.java index 507963bbcec..54ebf440779 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymNode.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymNode.java @@ -98,8 +98,7 @@ public class SymNode extends Pointer { public native @Cast("bool") boolean guard_bool(String file, @Cast("int64_t") long line); public native double guard_float(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); public native double guard_float(String file, @Cast("int64_t") long line); - public native @Cast("bool") boolean guard_size_oblivious(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); - public native @Cast("bool") boolean guard_size_oblivious(String file, @Cast("int64_t") long line); + public native @Cast("bool") boolean expect_true(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); public native @Cast("bool") boolean expect_true(String file, @Cast("int64_t") long line); public native @Cast("bool") boolean expect_size(@Cast("const char*") BytePointer file, @Cast("int64_t") long line); @@ -108,6 +107,7 @@ public class SymNode extends Pointer { public native @Cast("bool") boolean bool_(); public native @Cast("bool") boolean has_hint(); public native @StdString BytePointer str(); + public native @StdString BytePointer _graph_repr(); public native @ByVal LongOptional nested_int(); public native @ByVal LongOptional nested_int_coeff(); public native @ByVal LongOptional constant_int(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShapeMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShapeMeta.java index 8bb49867491..e04696c7c50 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShapeMeta.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/SymbolicShapeMeta.java @@ -45,6 +45,8 @@ public class SymbolicShapeMeta extends Pointer { private native void allocate(); public SymbolicShapeMeta(@Const @ByRef SymbolicShapeMeta other) { super((Pointer)null); allocate(other); } private native void allocate(@Const @ByRef SymbolicShapeMeta other); + + public native void refresh_numel(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java index 804c8aaf7c4..61fe1dab284 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhImplCloneable.java @@ -33,6 +33,6 @@ public class TanhImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java index 2bfb1edabfa..63f6d518b77 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TanhshrinkImplCloneable.java @@ -33,6 +33,6 @@ public class TanhshrinkImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java index 9f75d08f765..4833170bb3f 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Tensor.java @@ -259,7 +259,7 @@ private native void allocate( * // f requires grad, has no operation creating it * }

- * \fn void backward(const Tensor & gradient={}, std::optional retain_graph=c10::nullopt, bool create_graph=false, std::optional inputs=c10::nullopt) const; + * \fn void backward(const Tensor & gradient={}, std::optional retain_graph=std::nullopt, bool create_graph=false, std::optional inputs=std::nullopt) const; * * Computes the gradient of current tensor with respect to graph leaves. * @@ -299,7 +299,7 @@ private native void allocate( /// /// /// - public native void backward(@Const @ByRef(nullValue = "at::Tensor{}") Tensor gradient, @ByVal(nullValue = "std::optional(c10::nullopt)") BoolOptional retain_graph, @Cast("bool") boolean create_graph/*=false*/, @ByVal(nullValue = "std::optional(c10::nullopt)") TensorArrayRefOptional inputs); + public native void backward(@Const @ByRef(nullValue = "at::Tensor{}") Tensor gradient, @ByVal(nullValue = "std::optional(std::nullopt)") BoolOptional retain_graph, @Cast("bool") boolean create_graph/*=false*/, @ByVal(nullValue = "std::optional(std::nullopt)") TensorArrayRefOptional inputs); public native void backward(); /** \fn Tensor detach() const; diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java index 3256982ec8a..6e0140917b2 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBase.java @@ -93,7 +93,7 @@ public class TensorBase extends AbstractTensor { public native @Const @ByRef TensorBase fill_(@Const @ByRef Scalar scalar); public native @Const @ByRef TensorBase zero_(); - public native @ByVal TensorBase to(@ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @Cast("bool") boolean non_blocking/*=false*/, @Cast("bool") boolean copy/*=false*/, @ByVal(nullValue = "std::optional(c10::nullopt)") MemoryFormatOptional memory_format); + public native @ByVal TensorBase to(@ByVal(nullValue = "at::TensorOptions{}") TensorOptions options, @Cast("bool") boolean non_blocking/*=false*/, @Cast("bool") boolean copy/*=false*/, @ByVal(nullValue = "std::optional(std::nullopt)") MemoryFormatOptional memory_format); public native @ByVal TensorBase to(); public native @Cast("bool") boolean is_complex(); @@ -396,7 +396,7 @@ public class TensorBase extends AbstractTensor { * // f requires grad, has no operation creating it * }

- * \fn void backward(const Tensor & gradient={}, std::optional retain_graph=c10::nullopt, bool create_graph=false, std::optional inputs=c10::nullopt) const; + * \fn void backward(const Tensor & gradient={}, std::optional retain_graph=std::nullopt, bool create_graph=false, std::optional inputs=std::nullopt) const; * * Computes the gradient of current tensor with respect to graph leaves. * diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBatchDataset.java index ec88ce05d3a..2d1fcf5e938 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBatchDataset.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorBatchDataset.java @@ -31,7 +31,8 @@ public class TensorBatchDataset extends Pointer { public native @ByVal TensorExampleVector get_batch(@ByVal SizeTArrayRef request); public native @ByVal TensorExampleVector get_batch(@ByVal @Cast({"size_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("size_t") long... request); - /** Returns the size of the dataset, or an empty optional if it is unsized. */ + /** Returns the size of the dataset, or an empty std::optional if it is + * unsized. */ public native @ByVal SizeTOptional size(); /** Creates a {@code MapDataset} that applies the given {@code transform} to this dataset. */ diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java index 86b53a1f5e7..b8033add7ae 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorImpl.java @@ -702,7 +702,7 @@ public native void release_storage_and_set_meta_custom_data_ptr_error_msg_( public native void set_sizes_and_strides( @ByVal SymIntArrayRef sizes, @ByVal SymIntArrayRef strides, - @ByVal(nullValue = "std::optional(c10::nullopt)") SymIntOptional storage_offset); + @ByVal(nullValue = "std::optional(std::nullopt)") SymIntOptional storage_offset); public native void set_sizes_and_strides( @ByVal SymIntArrayRef sizes, @ByVal SymIntArrayRef strides); @@ -758,14 +758,14 @@ public native void set_sizes_and_strides( public native void set_sizes_and_strides( @ByVal LongArrayRef new_size, @ByVal LongArrayRef new_stride, - @ByVal(nullValue = "std::optional(c10::nullopt)") LongOptional storage_offset); + @ByVal(nullValue = "std::optional(std::nullopt)") LongOptional storage_offset); public native void set_sizes_and_strides( @ByVal LongArrayRef new_size, @ByVal LongArrayRef new_stride); public native void set_sizes_and_strides( @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] new_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] new_stride, - @ByVal(nullValue = "std::optional(c10::nullopt)") LongOptional storage_offset); + @ByVal(nullValue = "std::optional(std::nullopt)") LongOptional storage_offset); public native void set_sizes_and_strides( @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] new_size, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long... new_stride); @@ -775,7 +775,7 @@ public native void set_sizes_and_strides( * storage / storage_offset). See NOTE [ Metadata Change for a Detached Tensor * ] for details. */ - public native void set_allow_tensor_metadata_change(@Cast("bool") boolean value); + /** * True if a tensor allows changes to its metadata (e.g. sizes / strides / diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndex.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndex.java index 1a240effeca..94317b2c331 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndex.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorIndex.java @@ -47,10 +47,10 @@ @Namespace("at::indexing") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class TensorIndex extends Pointer { static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public TensorIndex(Pointer p) { super(p); } // Case 1: `at::indexing::None` - public TensorIndex(@ByVal(nullValue = "c10::nullopt_t(at::indexing::None)") @Cast("c10::nullopt_t*") Pointer none) { super((Pointer)null); allocate(none); } - private native void allocate(@ByVal(nullValue = "c10::nullopt_t(at::indexing::None)") @Cast("c10::nullopt_t*") Pointer none); public TensorIndex() { super((Pointer)null); allocate(); } private native void allocate(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptions.java index 11d8b9f21f7..8eceff837b4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptions.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorOptions.java @@ -195,7 +195,7 @@ public class TensorOptions extends Pointer { /** Returns whether the device is specified. */ public native @Cast("bool") @NoException(true) boolean has_device(); - /** Returns the device of the {@code TensorOptions}, or {@code c10::nullopt} if + /** Returns the device of the {@code TensorOptions}, or {@code std::nullopt} if * device is not specified. */ public native @ByVal @NoException(true) DeviceOptional device_opt(); @@ -208,7 +208,7 @@ public class TensorOptions extends Pointer { /** Returns whether the dtype is specified. */ public native @Cast("bool") @NoException(true) boolean has_dtype(); - /** Returns the dtype of the {@code TensorOptions}, or {@code c10::nullopt} if + /** Returns the dtype of the {@code TensorOptions}, or {@code std::nullopt} if * device is not specified. */ public native @ByVal @NoException(true) TypeMetaOptional dtype_opt(); @@ -218,7 +218,7 @@ public class TensorOptions extends Pointer { /** Returns whether the layout is specified. */ public native @Cast("bool") @NoException(true) boolean has_layout(); - /** Returns the layout of the {@code TensorOptions}, or {@code c10::nullopt} if + /** Returns the layout of the {@code TensorOptions}, or {@code std::nullopt} if * layout is not specified. */ public native @ByVal @NoException(true) LayoutOptional layout_opt(); @@ -229,7 +229,7 @@ public class TensorOptions extends Pointer { public native @Cast("bool") @NoException(true) boolean has_requires_grad(); /** Returns the {@code requires_grad} property of the {@code TensorOptions}, or - * {@code c10::nullopt} if {@code requires_grad} is not specified. */ + * {@code std::nullopt} if {@code requires_grad} is not specified. */ public native @ByVal @NoException(true) BoolOptional requires_grad_opt(); /** Returns the {@code pinned_memory} property of the {@code TensorOptions}. */ @@ -251,7 +251,7 @@ public class TensorOptions extends Pointer { public native @Cast("bool") boolean type_equal(@Const @ByRef TensorOptions other); /** Returns the {@code pinned_memory} property of the {@code TensorOptions}, or - * {@code c10::nullopt} if {@code pinned_memory} is not specified. */ + * {@code std::nullopt} if {@code pinned_memory} is not specified. */ public native @ByVal @NoException(true) BoolOptional pinned_memory_opt(); /** Returns whether the {@code memory_layout} is specified */ @@ -261,7 +261,7 @@ public class TensorOptions extends Pointer { // behavior of memory_format varies from function to function. /** Returns the {@code memory_layout} property of {@code TensorOptions, or - * }c10::nullopt{@code if }memory_format{@code is not specified. */ + * }std::nullopt{@code if }memory_format{@code is not specified. */ public native @ByVal @NoException(true) MemoryFormatOptional memory_format_opt(); // Resolves the ATen backend specified by the current construction axes. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorDict.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorDict.java index 6ed41eda394..fcb605a7460 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorDict.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TensorTensorDict.java @@ -143,7 +143,7 @@ public class TensorTensorDict extends Pointer { public native @Cast("bool") boolean is(@Const @ByRef TensorTensorDict rhs); // private API for now because the return type will change to TypePtr - // instead of optional once types are mandatory. + // instead of std::optional once types are mandatory. public native @ByVal Type.TypePtr keyType(); public native @ByVal Type.TypePtr valueType(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java index d7e5da13983..9f7d62857c4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ThresholdImplCloneable.java @@ -33,6 +33,6 @@ public class ThresholdImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TraceState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TraceState.java index 86a84676841..6ef3ce9109c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TraceState.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TraceState.java @@ -25,17 +25,13 @@ public class TraceState extends Pointer { /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ public TraceState(Pointer p) { super(p); } - public TraceState( - @StdVector SymIntOptional ss, - @Cast("size_t") long num_outputs) { super((Pointer)null); allocate(ss, num_outputs); } - private native void allocate( - @StdVector SymIntOptional ss, - @Cast("size_t") long num_outputs); + public TraceState(@ByRef(true) SymIntOptionalVector ss, @Cast("size_t") long num_outputs) { super((Pointer)null); allocate(ss, num_outputs); } + private native void allocate(@ByRef(true) SymIntOptionalVector ss, @Cast("size_t") long num_outputs); public native void debug_asserts(); public native @ByVal SymIntOptional next_sym_size(); public native @Cast("size_t") long sym_sizes_index(); public native TraceState sym_sizes_index(long setter); - public native @StdVector SymIntOptional sym_sizes(); public native TraceState sym_sizes(SymIntOptional setter); + public native @ByRef SymIntOptionalVector sym_sizes(); public native TraceState sym_sizes(SymIntOptionalVector setter); public native @ByRef TensorVector outputs(); public native TraceState outputs(TensorVector setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java index d2c08206183..5b5955154a0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderImplCloneable.java @@ -33,6 +33,6 @@ public class TransformerDecoderImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java index c0f4ca176b0..7deae38c679 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerDecoderLayerImplCloneable.java @@ -33,6 +33,6 @@ public class TransformerDecoderLayerImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java index 460c9246c31..655194c38f8 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderImplCloneable.java @@ -33,6 +33,6 @@ public class TransformerEncoderImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java index 17d2da73ac3..55d65fc1df0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerEncoderLayerImplCloneable.java @@ -33,6 +33,6 @@ public class TransformerEncoderLayerImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java index ee4342c4bbf..1e0ace8454d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TransformerImplCloneable.java @@ -33,6 +33,6 @@ public class TransformerImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java index 13f64617d71..39be8a07c5c 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginLossImplCloneable.java @@ -33,6 +33,6 @@ public class TripletMarginLossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java index 2658e6b65ef..20c104ff0b0 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/TripletMarginWithDistanceLossImplCloneable.java @@ -33,6 +33,6 @@ public class TripletMarginWithDistanceLossImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UndefinedTensorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UndefinedTensorImpl.java index f6c0e9db1e8..fa3a08eba32 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UndefinedTensorImpl.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UndefinedTensorImpl.java @@ -33,6 +33,8 @@ public class UndefinedTensorImpl extends TensorImpl { // #ifdef _WIN32 // #else public static native @Const TensorImpl singleton(); +// #endif + // #ifdef DEBUG public native @Cast("bool") boolean has_storage(); // #endif diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java index 54d785a7a7b..f63da29fd0d 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnflattenImplCloneable.java @@ -33,6 +33,6 @@ public class UnflattenImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java index e4e50c2e4f9..690ed61c135 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UnfoldImplCloneable.java @@ -33,6 +33,6 @@ public class UnfoldImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Unpickler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Unpickler.java index 8d277373d37..ce42a54310a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Unpickler.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Unpickler.java @@ -36,6 +36,8 @@ public class Unpickler extends Pointer { // type_resolver_ can not return. // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + // tensors inside the pickle contain meta-data, the raw tensor // dead is retrieved by calling `read_record`. // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java index b89e656d0fd..5f12d851dde 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/UpsampleImplCloneable.java @@ -33,6 +33,6 @@ public class UpsampleImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Work.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Work.java index 53acc26e275..c863d8a0165 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/Work.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Work.java @@ -33,24 +33,24 @@ public Work( int rank/*=-1*/, OpType opType/*=c10d::OpType::UNKNOWN*/, @Cast("const char*") BytePointer profilingTitle/*=nullptr*/, - @Const @ByRef(nullValue = "std::optional >(c10::nullopt)") TensorVectorOptional inputTensors) { super((Pointer)null); allocate(rank, opType, profilingTitle, inputTensors); } + @Const @ByRef(nullValue = "std::optional >(std::nullopt)") TensorVectorOptional inputTensors) { super((Pointer)null); allocate(rank, opType, profilingTitle, inputTensors); } @IntrusivePtr @Name("c10::make_intrusive") private native void allocate( int rank/*=-1*/, OpType opType/*=c10d::OpType::UNKNOWN*/, @Cast("const char*") BytePointer profilingTitle/*=nullptr*/, - @Const @ByRef(nullValue = "std::optional >(c10::nullopt)") TensorVectorOptional inputTensors); + @Const @ByRef(nullValue = "std::optional >(std::nullopt)") TensorVectorOptional inputTensors); public Work() { super((Pointer)null); allocate(); } @IntrusivePtr @Name("c10::make_intrusive") private native void allocate(); public Work( int rank/*=-1*/, @Cast("c10d::OpType") byte opType/*=c10d::OpType::UNKNOWN*/, String profilingTitle/*=nullptr*/, - @Const @ByRef(nullValue = "std::optional >(c10::nullopt)") TensorVectorOptional inputTensors) { super((Pointer)null); allocate(rank, opType, profilingTitle, inputTensors); } + @Const @ByRef(nullValue = "std::optional >(std::nullopt)") TensorVectorOptional inputTensors) { super((Pointer)null); allocate(rank, opType, profilingTitle, inputTensors); } @IntrusivePtr @Name("c10::make_intrusive") private native void allocate( int rank/*=-1*/, @Cast("c10d::OpType") byte opType/*=c10d::OpType::UNKNOWN*/, String profilingTitle/*=nullptr*/, - @Const @ByRef(nullValue = "std::optional >(c10::nullopt)") TensorVectorOptional inputTensors); + @Const @ByRef(nullValue = "std::optional >(std::nullopt)") TensorVectorOptional inputTensors); // Checks if request has completed. Non-blocking operation. public native @Cast("bool") boolean isCompleted(); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/WriteableTensorData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/WriteableTensorData.java index 9ae11fb95b7..e4ea42363d3 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/WriteableTensorData.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/WriteableTensorData.java @@ -19,7 +19,6 @@ import static org.bytedeco.pytorch.global.torch.*; -// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) @Namespace("torch::jit") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) public class WriteableTensorData extends Pointer { static { Loader.load(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksInterface.java index 3bd4380ec6f..7ad069ad428 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksInterface.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/XPUHooksInterface.java @@ -20,7 +20,7 @@ @Namespace("at") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class) -public class XPUHooksInterface extends Pointer { +public class XPUHooksInterface extends AcceleratorHooksInterface { static { Loader.load(); } /** Default native constructor. */ public XPUHooksInterface() { super((Pointer)null); allocate(); } @@ -62,5 +62,7 @@ public class XPUHooksInterface extends Pointer { public native Allocator getPinnedMemoryAllocator(); - public native @Cast("bool") boolean isPinnedPtr(@Const Pointer arg0); + public native @Cast("bool") boolean isPinnedPtr(@Const Pointer data); + + public native @Cast("bool") boolean hasPrimaryContext(@Cast("c10::DeviceIndex") byte device_index); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplCloneable.java index 7426e4c5809..d6fd5bf3da4 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad1dImplCloneable.java @@ -33,6 +33,6 @@ public class ZeroPad1dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java index bdc8728969a..8a691787edc 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad2dImplCloneable.java @@ -33,6 +33,6 @@ public class ZeroPad2dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplCloneable.java index 33974e55958..6e524ce979a 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplCloneable.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ZeroPad3dImplCloneable.java @@ -33,6 +33,6 @@ public class ZeroPad3dImplCloneable extends Module { * and submodules in the cloned module are different from those in the * original module. */ public native @SharedPtr("torch::nn::Module") @ByVal Module clone( - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") DeviceOptional device); public native @SharedPtr("torch::nn::Module") @ByVal Module clone(); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AnnotationEntry.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AnnotationEntry.java new file mode 100644 index 00000000000..073a2f57c42 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/AnnotationEntry.java @@ -0,0 +1,49 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.javacpp.chrono.*; +import static org.bytedeco.javacpp.global.chrono.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.cusparse.*; +import static org.bytedeco.cuda.global.cusparse.*; +import org.bytedeco.cuda.cusolver.*; +import static org.bytedeco.cuda.global.cusolver.*; +import org.bytedeco.cuda.cupti.*; +import static org.bytedeco.cuda.global.cupti.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +// Calls made by record_function will save annotations +@Namespace("c10::cuda::CUDACachingAllocator") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class AnnotationEntry extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public AnnotationEntry(Pointer p) { super(p); } + + public AnnotationEntry(byte device, @Cast("c10::approx_time_t") long time) { super((Pointer)null); allocate(device, time); } + private native void allocate(byte device, @Cast("c10::approx_time_t") long time); + + public native void recordUserMetadata(@StdString BytePointer name, @StdString BytePointer value); + public native void recordUserMetadata(@StdString String name, @StdString String value); + + public native byte device_(); public native AnnotationEntry device_(byte setter); + public native @ByRef trace_time_ time_(); public native AnnotationEntry time_(trace_time_ setter); + public native @ByRef ExtraFilesMap metadata_(); public native AnnotationEntry metadata_(ExtraFilesMap setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CTCLossDescriptor.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CTCLossDescriptor.java index c255409744c..c7b8f3a3d24 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CTCLossDescriptor.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CTCLossDescriptor.java @@ -53,4 +53,10 @@ public native void setEx( @Cast("cudnnDataType_t") int datatype, @Cast("cudnnLossNormalizationMode_t") int normMode, @Cast("cudnnNanPropagation_t") int gradMode); + public native void set_v8_v9( + @Cast("cudnnDataType_t") int datatype, + @Cast("cudnnLossNormalizationMode_t") int normMode, + @Cast("cudnnNanPropagation_t") int gradMode, + int maxLabelLength); + } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java index be1814cdf8a..fbccfa89c56 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/CUDAAllocator.java @@ -46,7 +46,8 @@ public class CUDAAllocator extends Allocator { public native void cacheInfo(byte device, @Cast("size_t*") SizeTPointer largestBlock); public native Pointer getBaseAllocation(Pointer ptr, @Cast("size_t*") SizeTPointer size); public native void recordStream(@StdMove DataPtr arg0, @ByVal CUDAStream stream); - public native @ByVal DeviceStats getDeviceStats(byte device); + public native @ByVal DeviceStats getDeviceStats( + byte device); public native void resetAccumulatedStats(byte device); public native void resetPeakStats(byte device); public native @ByVal SnapshotInfo snapshot(); @@ -63,6 +64,7 @@ public native void endAllocateToPool( byte device, @ByVal @Cast("c10::cuda::MempoolId_t*") DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair mempool_id, @Const @ByRef PointerSet expected_live_allocations); + public native @ByVal ShareableHandle shareIpcHandle(Pointer ptr); public native @SharedPtr Pointer getIpcDevPtr(@StdString BytePointer handle); public native @SharedPtr Pointer getIpcDevPtr(@StdString String handle); public native @Cast("bool") boolean isHistoryEnabled(); @@ -76,6 +78,8 @@ public native void recordHistory( @ByVal @Cast("c10::cuda::CUDACachingAllocator::CreateContextFn*") Pointer context_recorder, @Cast("size_t") long alloc_trace_max_entries, @Cast("c10::cuda::CUDACachingAllocator::RecordContext") int when); + public native void recordAnnotation( + @StdVector StringPair md); public native void attachOutOfMemoryObserver(@ByVal @Cast("c10::cuda::CUDACachingAllocator::OutOfMemoryObserver*") AllocatorTraceTracker observer); // Attached AllocatorTraceTracker callbacks will be called while the diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java index 5f123c0051c..de1ae9ba680 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/DeviceStats.java @@ -31,7 +31,7 @@ // Struct containing memory allocator summary statistics for a device. -@Namespace("c10::cuda::CUDACachingAllocator") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +@Namespace("c10::CachingDeviceAllocator") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) public class DeviceStats extends Pointer { static { Loader.load(); } /** Default native constructor. */ @@ -50,31 +50,32 @@ public class DeviceStats extends Pointer { } // COUNT: allocations requested by client code - public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat allocation(); public native DeviceStats allocation(Stat setter); - // COUNT: number of allocated segments from cudaMalloc(). - public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat segment(); public native DeviceStats segment(Stat setter); + public native @ByRef @Cast("c10::CachingDeviceAllocator::StatArray*") Stat allocation(); public native DeviceStats allocation(Stat setter); + // COUNT: number of allocated segments from device memory allocation. + public native @ByRef @Cast("c10::CachingDeviceAllocator::StatArray*") Stat segment(); public native DeviceStats segment(Stat setter); // COUNT: number of active memory blocks (allocated or used by stream) - public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat active(); public native DeviceStats active(Stat setter); + public native @ByRef @Cast("c10::CachingDeviceAllocator::StatArray*") Stat active(); public native DeviceStats active(Stat setter); // COUNT: number of inactive, split memory blocks (unallocated but can't be - // released via cudaFree) - public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat inactive_split(); public native DeviceStats inactive_split(Stat setter); + // released via device memory deallocation) + public native @ByRef @Cast("c10::CachingDeviceAllocator::StatArray*") Stat inactive_split(); public native DeviceStats inactive_split(Stat setter); // SUM: bytes allocated by this memory alocator - public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat allocated_bytes(); public native DeviceStats allocated_bytes(Stat setter); + public native @ByRef @Cast("c10::CachingDeviceAllocator::StatArray*") Stat allocated_bytes(); public native DeviceStats allocated_bytes(Stat setter); // SUM: bytes reserved by this memory allocator (both free and used) - public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat reserved_bytes(); public native DeviceStats reserved_bytes(Stat setter); + public native @ByRef @Cast("c10::CachingDeviceAllocator::StatArray*") Stat reserved_bytes(); public native DeviceStats reserved_bytes(Stat setter); // SUM: bytes within active memory blocks - public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat active_bytes(); public native DeviceStats active_bytes(Stat setter); + public native @ByRef @Cast("c10::CachingDeviceAllocator::StatArray*") Stat active_bytes(); public native DeviceStats active_bytes(Stat setter); // SUM: bytes within inactive, split memory blocks - public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat inactive_split_bytes(); public native DeviceStats inactive_split_bytes(Stat setter); + public native @ByRef @Cast("c10::CachingDeviceAllocator::StatArray*") Stat inactive_split_bytes(); public native DeviceStats inactive_split_bytes(Stat setter); // SUM: bytes requested by client code - public native @ByRef @Cast("c10::cuda::CUDACachingAllocator::StatArray*") Stat requested_bytes(); public native DeviceStats requested_bytes(Stat setter); + public native @ByRef @Cast("c10::CachingDeviceAllocator::StatArray*") Stat requested_bytes(); public native DeviceStats requested_bytes(Stat setter); - // COUNT: total number of failed calls to CUDA malloc necessitating cache + // COUNT: total number of failed calls to device malloc necessitating cache // flushes. public native @Cast("int64_t") long num_alloc_retries(); public native DeviceStats num_alloc_retries(long setter); - // COUNT: total number of OOMs (i.e. failed calls to CUDA after cache flush) + // COUNT: total number of OOMs (i.e. failed calls to device memory allocation + // after cache flush) public native @Cast("int64_t") long num_ooms(); public native DeviceStats num_ooms(long setter); // COUNT: total number of oversize blocks allocated from pool @@ -86,12 +87,12 @@ public class DeviceStats extends Pointer { // COUNT: total number of synchronize_and_free_events() calls public native @Cast("int64_t") long num_sync_all_streams(); public native DeviceStats num_sync_all_streams(long setter); - // COUNT: total number of CUDA allocation calls. This includes both cuMemMap - // and cudaMalloc. + // COUNT: total number of device memory allocation calls. This includes both + // mapped and malloced memory. public native @Cast("int64_t") long num_device_alloc(); public native DeviceStats num_device_alloc(long setter); - // COUNT: total number of CUDA free calls. This includes both cuMemUnmap - // and cudaFree. + // COUNT: total number of device memory deallocation calls. This includes both + // un-mapped and free memory. public native @Cast("int64_t") long num_device_free(); public native DeviceStats num_device_free(long setter); // SIZE: maximum block size that is allowed to be split. diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/MemPool.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/MemPool.java new file mode 100644 index 00000000000..61361a94561 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/MemPool.java @@ -0,0 +1,65 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.javacpp.chrono.*; +import static org.bytedeco.javacpp.global.chrono.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.cusparse.*; +import static org.bytedeco.cuda.global.cusparse.*; +import org.bytedeco.cuda.cusolver.*; +import static org.bytedeco.cuda.global.cusolver.*; +import org.bytedeco.cuda.cupti.*; +import static org.bytedeco.cuda.global.cupti.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +// MemPool represents a pool of memory in a caching allocator. Currently, +// it's just the ID of the pool object maintained in the CUDACachingAllocator. +// +// An allocator pointer can be passed to the MemPool to define how the +// allocations should be done in the pool. For example: using a different +// system allocator such as ncclMemAlloc. +@Namespace("c10::cuda") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class MemPool extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public MemPool(Pointer p) { super(p); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public MemPool(long size) { super((Pointer)null); allocateArray(size); } + private native void allocateArray(long size); + @Override public MemPool position(long position) { + return (MemPool)super.position(position); + } + @Override public MemPool getPointer(long i) { + return new MemPool((Pointer)this).offsetAddress(i); + } + + public MemPool( + CUDAAllocator allocator/*=nullptr*/, + @Cast("bool") boolean is_user_created/*=true*/) { super((Pointer)null); allocate(allocator, is_user_created); } + private native void allocate( + CUDAAllocator allocator/*=nullptr*/, + @Cast("bool") boolean is_user_created/*=true*/); + public MemPool() { super((Pointer)null); allocate(); } + private native void allocate(); + + public native @ByVal @Cast("c10::cuda::MempoolId_t*") DeviceAssertionsDataVectorCUDAKernelLaunchInfoVectorPair id(); + public native CUDAAllocator allocator(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/MemPoolContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/MemPoolContext.java new file mode 100644 index 00000000000..2f6dd5ad302 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/MemPoolContext.java @@ -0,0 +1,54 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.javacpp.chrono.*; +import static org.bytedeco.javacpp.global.chrono.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.cusparse.*; +import static org.bytedeco.cuda.global.cusparse.*; +import org.bytedeco.cuda.cusolver.*; +import static org.bytedeco.cuda.global.cusolver.*; +import org.bytedeco.cuda.cupti.*; +import static org.bytedeco.cuda.global.cupti.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +// MemPoolContext holds the currently active pool and stashes the previous +// pool. On deletion it makes the previous pool active. +@Namespace("c10::cuda") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class MemPoolContext extends Pointer { + static { Loader.load(); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public MemPoolContext(Pointer p) { super(p); } + + public MemPoolContext(MemPool mempool) { super((Pointer)null); allocate(mempool); } + private native void allocate(MemPool mempool); + + // getActiveMemPool() can be used to get the currently active pool. + // For instance: in CUDACachingAllocator, we can route allocations + // to a user provided allocator, by doing: + // + // auto active_pool = MemPoolContext::getActiveMemPool(); + // if (active_pool && active_pool->allocator()) { + // ptr = active_pool->allocator()->raw_alloc(size); + // } + // + public static native MemPool getActiveMemPool(); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ShareableHandle.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ShareableHandle.java new file mode 100644 index 00000000000..ef1b7676692 --- /dev/null +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ShareableHandle.java @@ -0,0 +1,53 @@ +// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE + +package org.bytedeco.pytorch.cuda; + +import org.bytedeco.pytorch.Allocator; +import java.nio.*; +import org.bytedeco.javacpp.*; +import org.bytedeco.javacpp.annotation.*; + +import static org.bytedeco.javacpp.presets.javacpp.*; +import static org.bytedeco.openblas.global.openblas_nolapack.*; +import static org.bytedeco.openblas.global.openblas.*; +import org.bytedeco.javacpp.chrono.*; +import static org.bytedeco.javacpp.global.chrono.*; +import org.bytedeco.pytorch.*; +import static org.bytedeco.pytorch.global.torch.*; +import org.bytedeco.cuda.cudart.*; +import static org.bytedeco.cuda.global.cudart.*; +import org.bytedeco.cuda.cublas.*; +import static org.bytedeco.cuda.global.cublas.*; +import org.bytedeco.cuda.cudnn.*; +import static org.bytedeco.cuda.global.cudnn.*; +import org.bytedeco.cuda.cusparse.*; +import static org.bytedeco.cuda.global.cusparse.*; +import org.bytedeco.cuda.cusolver.*; +import static org.bytedeco.cuda.global.cusolver.*; +import org.bytedeco.cuda.cupti.*; +import static org.bytedeco.cuda.global.cupti.*; + +import static org.bytedeco.pytorch.global.torch_cuda.*; + + +@Namespace("c10::cuda::CUDACachingAllocator") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +public class ShareableHandle extends Pointer { + static { Loader.load(); } + /** Default native constructor. */ + public ShareableHandle() { super((Pointer)null); allocate(); } + /** Native array allocator. Access with {@link Pointer#position(long)}. */ + public ShareableHandle(long size) { super((Pointer)null); allocateArray(size); } + /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */ + public ShareableHandle(Pointer p) { super(p); } + private native void allocate(); + private native void allocateArray(long size); + @Override public ShareableHandle position(long position) { + return (ShareableHandle)super.position(position); + } + @Override public ShareableHandle getPointer(long i) { + return new ShareableHandle((Pointer)this).offsetAddress(i); + } + + public native @Cast("ptrdiff_t") long offset(); public native ShareableHandle offset(long setter); + public native @StdString BytePointer handle(); public native ShareableHandle handle(BytePointer setter); +} diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SnapshotInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SnapshotInfo.java index 17274d99d4a..a8f37590f0e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SnapshotInfo.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/SnapshotInfo.java @@ -50,5 +50,6 @@ public class SnapshotInfo extends Pointer { public native @StdVector SegmentInfo segments(); public native SnapshotInfo segments(SegmentInfo setter); public native @StdVector TraceEntryVector device_traces(); public native SnapshotInfo device_traces(TraceEntryVector setter); + public native @StdVector AnnotationEntry external_annotations(); public native SnapshotInfo external_annotations(AnnotationEntry setter); public native @ByRef AllocatorConfigInfo config_metadata(); public native SnapshotInfo config_metadata(AllocatorConfigInfo setter); } diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Stat.java b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Stat.java index 0d9899e69de..1b9961ed273 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Stat.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/Stat.java @@ -30,7 +30,7 @@ import static org.bytedeco.pytorch.global.torch_cuda.*; -@Namespace("c10::cuda::CUDACachingAllocator") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) +@Namespace("c10::CachingDeviceAllocator") @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) public class Stat extends Pointer { static { Loader.load(); } /** Default native constructor. */ @@ -48,6 +48,14 @@ public class Stat extends Pointer { return new Stat((Pointer)this).offsetAddress(i); } + public native void increase(@Cast("size_t") long amount); + + public native void decrease(@Cast("size_t") long amount); + + public native void reset_accumulated(); + + public native void reset_peak(); + public native @Cast("int64_t") long current(); public native Stat current(long setter); public native @Cast("int64_t") long peak(); public native Stat peak(long setter); public native @Cast("int64_t") long allocated(); public native Stat allocated(long setter); diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java index fa1e8fbf33a..f5317e2549e 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch.java @@ -78,6 +78,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../StringSharedModuleDictItemVector.java +// Targeting ../SymIntOptionalVector.java + + // Targeting ../BoolVector.java @@ -246,6 +249,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../EnumNameValue.java +// Targeting ../StringPair.java + + // Targeting ../IntPair.java @@ -339,6 +345,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../StringIValueMap.java +// Targeting ../StringScalarTypeMap.java + + // Targeting ../StringValueMap.java @@ -670,7 +679,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #define C10_BUILD_SHARED_LIBS /* #undef C10_USE_GLOG */ /* #undef C10_USE_GFLAGS */ -/* #undef C10_USE_NUMA */ +// #define C10_USE_NUMA /* #undef C10_USE_MSVC_STATIC_RUNTIME */ /* #undef C10_USE_ROCM_KERNEL_ASSERT */ @@ -912,6 +921,24 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #if !defined(C10_ASAN_ENABLED) // #endif +// Detect undefined-behavior sanitizer (UBSAN) +// #undef C10_UBSAN_ENABLED + +// for clang or gcc >= 14 +// NB: gcc 14 adds support for Clang's __has_feature +// https://gcc.gnu.org/gcc-14/changes.html +// gcc < 14 doesn't have a macro for UBSAN +// (e.g. __SANITIZE_UNDEFINED__ does not exist in gcc) +// https://github.com/google/sanitizers/issues/765 +// #if defined(__has_feature) +// #if ((__has_feature(undefined_behavior_sanitizer))) +public static final int C10_UBSAN_ENABLED = 1; +// #endif +// #endif + +// #if !defined(C10_UBSAN_ENABLED) +// #endif + // Disable the copy and assignment operator for a class. Note that this will // disable the usage of the class in std containers. // #define C10_DISABLE_COPY_AND_ASSIGN(classname) @@ -1103,6 +1130,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #if defined(__ANDROID__) || defined(__APPLE__) || defined(__FreeBSD__) // Those platforms do not support assert() // #define CUDA_KERNEL_ASSERT(cond) +// #define CUDA_KERNEL_ASSERT_MSG(cond, msg) // #define SYCL_KERNEL_ASSERT(cond) // #elif defined(_MSC_VER) // #else // __APPLE__, _MSC_VER @@ -1111,6 +1139,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // ROCm disable kernel assert by default // #if !defined(C10_USE_ROCM_KERNEL_ASSERT) and defined(USE_ROCM) // #define CUDA_KERNEL_ASSERT(cond) +// #define CUDA_KERNEL_ASSERT_MSG(cond, msg) // #define SYCL_KERNEL_ASSERT(cond) // #else // #define CUDA_KERNEL_ASSERT(cond) @@ -1118,6 +1147,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // __assert_fail( // #cond, __FILE__, static_cast(__LINE__), __func__); // } +// #define CUDA_KERNEL_ASSERT_MSG(cond, msg) +// if (C10_UNLIKELY(!(cond))) { +// __assert_fail( +// msg, __FILE__, static_cast(__LINE__), __func__); +// } // #define SYCL_KERNEL_ASSERT(cond) // if (C10_UNLIKELY(!(cond))) { // __assert_fail( @@ -2285,10 +2319,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // element we can return for cases when a DispatchKeySet contains no elements. // You can think a more semantically accurate definition of DispatchKey is: // - // using DispatchKey = optional + // using DispatchKey = std::optional // // and Undefined == nullopt. We didn't actually represent - // it this way because optional would take two + // it this way because std::optional would take two // words, when DispatchKey fits in eight bits. Undefined((short)(0)), @@ -2505,50 +2539,51 @@ public class torch extends org.bytedeco.pytorch.presets.torch { AutocastXLA((short)(Undefined.value + 30)), // AutocastXLA is only being used for TPUs. XLA GPUs continue to use // AutocastCUDA. - AutocastCUDA((short)(Undefined.value + 31)), - AutocastPrivateUse1((short)(Undefined.value + 32)), + AutocastMPS((short)(Undefined.value + 31)), + AutocastCUDA((short)(Undefined.value + 32)), + AutocastPrivateUse1((short)(Undefined.value + 33)), // ~~~~~~~~~~~~~~~~~~~~~~~~~~~ WRAPPERS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // // There are a number of alternative modes which may want to handle before // autograd; for example, error checking, tracing, profiling or vmap. They // go here. - FuncTorchBatched((short)(Undefined.value + 33)), // See Note [Out-of-tree vmap+grad prototype] + FuncTorchBatched((short)(Undefined.value + 34)), // See Note [Out-of-tree vmap+grad prototype] // Dispatch key for BatchedTensorImpl wrapping a nested tensor. - BatchedNestedTensor((short)(Undefined.value + 34)), + BatchedNestedTensor((short)(Undefined.value + 35)), - FuncTorchVmapMode((short)(Undefined.value + 35)), // See Note [Out-of-tree vmap+grad prototype] + FuncTorchVmapMode((short)(Undefined.value + 36)), // See Note [Out-of-tree vmap+grad prototype] // This is the dispatch key for BatchedTensorImpl, which is used to implement // batching rules for vmap. - Batched((short)(Undefined.value + 36)), + Batched((short)(Undefined.value + 37)), // When we are inside a vmap, all tensors dispatch on this key. // See Note: [DispatchKey::VmapMode usage] for more details. - VmapMode((short)(Undefined.value + 37)), + VmapMode((short)(Undefined.value + 38)), - FuncTorchGradWrapper((short)(Undefined.value + 38)), // See Note [Out-of-tree vmap+grad prototype] + FuncTorchGradWrapper((short)(Undefined.value + 39)), // See Note [Out-of-tree vmap+grad prototype] // Out-of-core key for Deferred Module Initialization in torchdistx. // See https://pytorch.org/torchdistx/latest/deferred_init.html - DeferredInit((short)(Undefined.value + 39)), + DeferredInit((short)(Undefined.value + 40)), // Used by Python key logic to know the set of tls on entry to the dispatcher // This kernel assumes it is the top-most non-functorch-related DispatchKey. // If you add a key above, make sure to update the fallback implementation for // this. - PythonTLSSnapshot((short)(Undefined.value + 40)), + PythonTLSSnapshot((short)(Undefined.value + 41)), // This key should be at the very top of the dispatcher - FuncTorchDynamicLayerFrontMode((short)(Undefined.value + 41)), // See Note [Out-of-tree vmap+grad prototype] + FuncTorchDynamicLayerFrontMode((short)(Undefined.value + 42)), // See Note [Out-of-tree vmap+grad prototype] // TESTING: This is intended to be a generic testing tensor type id. // Don't use it for anything real; its only acceptable use is within a single // process test. Use it by creating a TensorImpl with this DispatchKey, and // then registering operators to operate on this type id. See // aten/src/ATen/core/dispatch/backend_fallback_test.cpp for a usage example. - TESTING_ONLY_GenericWrapper((short)(Undefined.value + 42)), + TESTING_ONLY_GenericWrapper((short)(Undefined.value + 43)), // TESTING: This is intended to be a generic testing tensor type id. // Don't use it for anything real; its only acceptable use is within a ingle @@ -2557,51 +2592,51 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // to operate on this type id. See // aten/src/ATen/core/dispatch/backend_fallback_test.cpp // for a usage example - TESTING_ONLY_GenericMode((short)(Undefined.value + 43)), + TESTING_ONLY_GenericMode((short)(Undefined.value + 44)), // This key is used for pre-dispatch tracing in make_fx. // It has lower priority than the PythonDispatcher key // because we use the PythonDispatcher to intercept the key from python, // and avoid having to implement it in C++. - PreDispatch((short)(Undefined.value + 44)), + PreDispatch((short)(Undefined.value + 45)), // This is a bypass that allows you to skip running the C++ dispatcher // entirely - PythonDispatcher((short)(Undefined.value + 45)), + PythonDispatcher((short)(Undefined.value + 46)), // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ FIN ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ // - EndOfFunctionalityKeys((short)(Undefined.value + 46)), + EndOfFunctionalityKeys((short)(Undefined.value + 47)), - StartOfDenseBackends((short)(Undefined.value + 47)), - CPU((short)(Undefined.value + 48)), + StartOfDenseBackends((short)(Undefined.value + 48)), + CPU((short)(Undefined.value + 49)), - CUDA((short)(Undefined.value + 49)), + CUDA((short)(Undefined.value + 50)), - HIP((short)(Undefined.value + 50)), + HIP((short)(Undefined.value + 51)), - XLA((short)(Undefined.value + 51)), + XLA((short)(Undefined.value + 52)), - MPS((short)(Undefined.value + 52)), + MPS((short)(Undefined.value + 53)), - IPU((short)(Undefined.value + 53)), + IPU((short)(Undefined.value + 54)), - XPU((short)(Undefined.value + 54)), + XPU((short)(Undefined.value + 55)), - HPU((short)(Undefined.value + 55)), + HPU((short)(Undefined.value + 56)), - VE((short)(Undefined.value + 56)), + VE((short)(Undefined.value + 57)), - Lazy((short)(Undefined.value + 57)), + Lazy((short)(Undefined.value + 58)), - MTIA((short)(Undefined.value + 58)), + MTIA((short)(Undefined.value + 59)), - PrivateUse1((short)(Undefined.value + 59)), + PrivateUse1((short)(Undefined.value + 60)), - PrivateUse2((short)(Undefined.value + 60)), + PrivateUse2((short)(Undefined.value + 61)), - PrivateUse3((short)(Undefined.value + 61)), + PrivateUse3((short)(Undefined.value + 62)), - Meta((short)(Undefined.value + 62)), + Meta((short)(Undefined.value + 63)), EndOfDenseBackends((short)(0)), StartOfQuantizedBackends((short)(1)), QuantizedCPU((short)(2)), @@ -3314,7 +3349,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // for a given backend key, use the associated autograd key. // for non-backend keys, use AutogradOther as a default. // Note: it's convenient and fast to return a default here rather than (say) -// returning an optional, or throwing. But it makes callers +// returning an std::optional, or throwing. But it makes callers // responsible for either a) enforcing the invariant that only backend keys // be passed as arguments, or b) interpreting our return value carefully. @Namespace("c10") public static native @ByVal DispatchKeySet getAutogradRelatedKeySetFromBackend(BackendComponent t); @@ -4076,6 +4111,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #if defined(__CUDACC__) && !defined(USE_ROCM) // #endif +// #if defined(__HIPCC__) && defined(USE_ROCM) +// #endif // #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) // #endif @@ -4121,6 +4158,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #if defined(__CUDACC__) && !defined(USE_ROCM) // #endif +// #if defined(__HIPCC__) && defined(USE_ROCM) +// #endif // defined(__HIPCC__) && defined(USE_ROCM) // #if defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS) // #endif @@ -4317,7 +4356,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include -// #include +// #include // #include // #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") @@ -4456,9 +4495,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { * and inspired by Half implementation from pytorch/c10/util/Half.h */ // #include -// #include // #include -// #include // #if defined(__cplusplus) // #include @@ -4473,16 +4510,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #endif // #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include - -// #include // operator typeid +// #include /* * Convert a 8-bit floating-point number in fp8 E4M3FN format, in bit @@ -4680,8 +4708,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { * Implementation based on the paper https://arxiv.org/pdf/2206.02915.pdf and * the existing Float8_e4m3fn implementation. */ +// #include // #include -// #include // #include // #include @@ -4984,7 +5012,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #if C10_CLANG_HAS_WARNING("-Wimplicit-int-float-conversion") // #endif -// #if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__) +// #if defined(__aarch64__) && !defined(__CUDACC__) // #else @@ -4993,7 +5021,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { -// #endif /* !defined(__aarch64__) || defined(C10_MOBILE) || defined(__CUDACC__) \ +// #endif /* !defined(__aarch64__) || defined(__CUDACC__) \ // */ // #if defined(__CUDACC__) || defined(__HIPCC__) @@ -5150,7 +5178,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // for SYCL 2020 // #endif -// #if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__) +// #if defined(__aarch64__) && !defined(__CUDACC__) // #endif /* @@ -5184,7 +5212,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { */ @Namespace("c10::detail") public static native @Cast("uint16_t") short fp16_ieee_from_fp32_value(float f); -// #if defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__) +// #if defined(__aarch64__) && !defined(__CUDACC__) // #endif @@ -5708,6 +5736,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // dummy struct for uint1 to uint7, actual functionality // of these dtypes will be implemented in python with Tensor subclass @@ -6101,6 +6130,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Cast("std::ostream*") @ByRef Pointer stream, ScalarType scalar_type); +// Returns a pair of strings representing the names for each dtype. +// The returned pair is (name, legacy_name_if_applicable) +@Namespace("c10") public static native @ByVal StringPair getDtypeNames( + ScalarType scalarType); + +// Returns a map of string name to dtype. +@Namespace("c10") public static native @Const @ByRef StringScalarTypeMap getStringToDtypeMap(); + // namespace c10 @@ -6142,9 +6179,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #include // #include +// #include // #include // #include // Targeting ../SymNode.java @@ -6184,9 +6221,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #include // #include +// #include // #include // #include // Targeting ../SymBool.java @@ -6200,17 +6237,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #define TORCH_SYM_INTERNAL_ASSERT(cond, ...) // TORCH_INTERNAL_ASSERT((cond).expect_true(__FILE__, __LINE__), __VA_ARGS__) -@Namespace("c10") public static native @Cast("bool") boolean guard_size_oblivious(@Cast("bool") boolean b, @Cast("const char*") BytePointer file, @Cast("int64_t") long line); -@Namespace("c10") public static native @Cast("bool") boolean guard_size_oblivious(@Cast("bool") boolean b, String file, @Cast("int64_t") long line); -@Namespace("c10") public static native @Cast("bool") boolean guard_size_oblivious( - @Const @ByRef SymBool b, - @Cast("const char*") BytePointer file, - @Cast("int64_t") long line); -@Namespace("c10") public static native @Cast("bool") boolean guard_size_oblivious( - @Const @ByRef SymBool b, - String file, - @Cast("int64_t") long line); + + // #define TORCH_GUARD_SIZE_OBLIVIOUS(cond) // c10::guard_size_oblivious((cond), __FILE__, __LINE__) @@ -6232,6 +6261,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // Targeting ../SymInt.java @@ -7878,9 +7908,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include +// #include // #include -// #include +// #include // #include +// #include @Namespace("c10") public static native @ByVal LongArrayRef asIntArrayRefUnchecked(@ByVal SymIntArrayRef ar); @@ -7895,7 +7927,20 @@ public class torch extends org.bytedeco.pytorch.presets.torch { +// Even slower than asIntArrayRefSlow, as it forces an allocation for a +// destination int, BUT it is able to force specialization (it never errors) +@Namespace("c10") public static native @ByVal DimVector asIntArrayRefSlowAlloc( + @ByVal SymIntArrayRef ar, + @Cast("const char*") BytePointer file, + @Cast("int64_t") long line); +@Namespace("c10") public static native @ByVal DimVector asIntArrayRefSlowAlloc( + @ByVal SymIntArrayRef ar, + String file, + @Cast("int64_t") long line); + // #define C10_AS_INTARRAYREF_SLOW(a) c10::asIntArrayRefSlow(a, __FILE__, __LINE__) +// #define C10_AS_INTARRAYREF_SLOW_ALLOC(a) +// c10::asIntArrayRefSlowAlloc(a, __FILE__, __LINE__) // Prefer using a more semantic constructor, like // fromIntArrayRefKnownNonNegative @@ -7942,7 +7987,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include // #include // #include @@ -8120,8 +8165,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include // #include +// #include // #include @@ -8478,6 +8523,17 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // CAFFE_ENFORCE_BINARY_OP_WITH_CALLER( // std::greater(), >, x, y, ##__VA_ARGS__) +// #define C10_LOG_EVENT_SAMPLED(event, ...) +// static const std::unique_ptr<::c10::EventSampledHandler>& +// _##event##EventSampledHandler = ::c10::GetEventSampledHandler(#event); +// if (_##event##EventSampledHandler) { +// _##event##EventSampledHandler->log(__VA_ARGS__); +// } + +// Must be called in the main thread before any other threads are spawned. + + + /** * Very lightweight logging for the first time API usage. It's beneficial for * tracking of individual functionality usage in larger applications. @@ -8621,12 +8677,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #include // #include // #include // #include // #include +// #include // #include // #include @@ -9050,7 +9106,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include +// #include // #include @Namespace("at") public enum NameType { BASIC((byte)(0)), WILDCARD((byte)(1)); @@ -9283,7 +9339,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include // #include // #include @@ -9800,7 +9856,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include // #include // #include // #include @@ -12746,7 +12802,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include // #define C10_FORALL_TYPES(_) // _(AnyType) @@ -12845,7 +12901,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("c10") public static native String typeKindToString(@Cast("c10::TypeKind") int kind); // Use this to customize how a Type is printed using `annotation_str()`. If -// c10::nullopt is returned, `annotation_str()` falls through to its default +// std::nullopt is returned, `annotation_str()` falls through to its default // implementation. // namespace detail // #define TORCH_DECLARE_SINGLETON(Type) @@ -12956,7 +13012,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include +// #include // #define DYNAMIC_TYPE_BIT(x) (1 << x) @Namespace("c10") @MemberGetter public static native @Cast("const c10::DynamicTypeBits") int kDynamicCovariantTypeBit(); @@ -13220,7 +13276,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include // #include // #include @@ -13294,7 +13350,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include // #include // #include // #include @@ -13833,7 +13889,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { * This device guard does NOT have an uninitialized state; it is guaranteed * to reset a device on exit. If you are in a situation where you *might* * want to setup a guard (i.e., are looking for the moral equivalent - * of optional), see OptionalDeviceGuard. */ + * of std::optional), see OptionalDeviceGuard. */ // Targeting ../OptionalDeviceGuard.java @@ -14019,6 +14075,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include +// #include // #include // #include @@ -14247,7 +14304,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // generic_to converts an IValue from a generic list or generic dict -// to a concrete list/dict type likelike List, Dict<...> or optional. +// to a concrete list/dict type likelike List, Dict<...> or std::optional. // Note that in the case of lists, this only works for IValue-based lists, // i.e. not for int64_t, double, ... // generic_to is an implementation detail of IValue::to and not @@ -14420,7 +14477,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // namespace ivalue // This is an owning wrapper for a std::optional> -// that can be implicitly converted to a (non-owning) optional>. +// that can be implicitly converted to a (non-owning) std::optional>. // Its purpose is to be used in generated code to keep the vector alive // either until the end of a statement (as a temporary), or as a saved arg // in autograd. @@ -14607,9 +14664,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { - - -@Namespace("c10::impl") public static native @Const IValue ptr_to_first_element(@Const @ByRef GenericList list); @@ -14627,7 +14681,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include // #include @@ -14682,6 +14736,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { + // Targeting ../DoubleComplexList.java @@ -15233,7 +15288,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include @Namespace("at") public static native @Cast("bool") boolean has_names(@Const @ByRef TensorArrayRef tensors); @Namespace("at") public static native @Cast("bool") boolean has_names(@Const @ByRef TensorVector tensors); @@ -15646,7 +15700,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch::autograd") public static native void backward( @Const @ByRef TensorVector tensors, @Const @ByRef(nullValue = "torch::autograd::variable_list{}") TensorVector grad_tensors, - @ByVal(nullValue = "std::optional(c10::nullopt)") BoolOptional retain_graph, + @ByVal(nullValue = "std::optional(std::nullopt)") BoolOptional retain_graph, @Cast("bool") boolean create_graph/*=false*/, @Const @ByRef(nullValue = "torch::autograd::variable_list{}") TensorVector inputs); @Namespace("torch::autograd") public static native void backward( @@ -15682,7 +15736,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Const @ByRef TensorVector outputs, @Const @ByRef TensorVector inputs, @Const @ByRef(nullValue = "torch::autograd::variable_list{}") TensorVector grad_outputs, - @ByVal(nullValue = "std::optional(c10::nullopt)") BoolOptional retain_graph, + @ByVal(nullValue = "std::optional(std::nullopt)") BoolOptional retain_graph, @Cast("bool") boolean create_graph/*=false*/, @Cast("bool") boolean allow_unused/*=false*/); @Namespace("torch::autograd") public static native @ByVal TensorVector grad( @@ -15732,11 +15786,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include // #include +// #include +// #include // #include // #include -// #include // Targeting ../OperatorName.java @@ -15753,7 +15807,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("c10") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer arg0, @Const @ByRef OperatorName arg1); // namespace c10 - + // namespace std // Parsed from ATen/core/dispatch/OperatorOptions.h @@ -15849,37 +15903,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// note: windows build doesn't find symbols in operator files unless -// this is a header file - -@Namespace("c10") public static native @Cast("size_t") long findFirstOutArg(@StdVector Argument args); - - - - - - - - - - - - - - - -// covariant subtyping of list of Arguments -@Namespace("c10") public static native @Cast("bool") boolean isSubtypeOfList( - @ByVal ArgumentArrayRef child, - @ByVal ArgumentArrayRef parent, - @Cast("std::ostream*") Pointer why_not); - - - // namespace c10 @@ -16019,8 +16046,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Returns true iff the given op name is on the allowlist // and should be registered -@Namespace("c10::impl") public static native @Cast("const bool") boolean op_allowlist_check(@StringView BytePointer op_name); -@Namespace("c10::impl") public static native @Cast("const bool") boolean op_allowlist_check(@StringView String op_name); + // Returns true iff the given schema string is on the allowlist // and should be registered @@ -16850,7 +16876,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #include // #include // #include @@ -16863,8 +16888,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include +// #include // #include +// #include // #ifdef C10_MOBILE // #endif @@ -16897,8 +16923,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #include +// #include // #include // #include @@ -17282,6 +17308,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Handle case where the kernel returns void. +@Namespace("c10::detail") public static native void _print_dispatch_trace(@StdString BytePointer label, @StdString BytePointer op_name, @Const @ByRef DispatchKeySet dispatchKeySet); +@Namespace("c10::detail") public static native void _print_dispatch_trace(@StdString String label, @StdString String op_name, @Const @ByRef DispatchKeySet dispatchKeySet); + // namespace detail // See [Note: Argument forwarding in the dispatcher] for why Args doesn't use && @@ -17346,14 +17375,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { core(0), data_dependent_output(1), dynamic_output_shape(2), - generated(3), - inplace_view(4), - needs_fixed_stride_order(5), - nondeterministic_bitwise(6), - nondeterministic_seeded(7), - pointwise(8), - pt2_compliant_tag(9), - view_copy(10); + flexible_layout(3), + generated(4), + inplace_view(5), + needs_fixed_stride_order(6), + nondeterministic_bitwise(7), + nondeterministic_seeded(8), + pointwise(9), + pt2_compliant_tag(10), + view_copy(11); public final int value; private Tag(int v) { this.value = v; } @@ -17443,7 +17473,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include +// #include // namespace torch::jit // This enumerator represents the 'kind' of an attribute - a buffer, a parameter, or neither. @@ -17731,7 +17761,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // torch::Library::DEF, // &TORCH_LIBRARY_init_##ns, // #ns, -// c10::nullopt, +// std::nullopt, // __FILE__, // __LINE__); // void TORCH_LIBRARY_init_##ns(torch::Library& m) @@ -17770,7 +17800,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // torch::Library::FRAGMENT, // &C10_CONCATENATE(TORCH_LIBRARY_FRAGMENT_init_##ns##_, uid), // #ns, -// c10::nullopt, +// std::nullopt, // __FILE__, // __LINE__); // void C10_CONCATENATE( @@ -17836,7 +17866,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // ? &C10_CONCATENATE(TORCH_LIBRARY_IMPL_init_##ns##_##k##_, uid) // : [](torch::Library&) -> void {}), // #ns, -// c10::make_optional(c10::DispatchKey::k), +// std::make_optional(c10::DispatchKey::k), // __FILE__, // __LINE__); // void C10_CONCATENATE( @@ -17851,13 +17881,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { /** \private */ // #define MAKE_TORCH_LIBRARY(ns) -// torch::Library(torch::Library::DEF, #ns, c10::nullopt, __FILE__, __LINE__) +// torch::Library(torch::Library::DEF, #ns, std::nullopt, __FILE__, __LINE__) /** \private */ // #define MAKE_TORCH_LIBRARY_IMPL(ns, k) // torch::Library( // torch::Library::IMPL, // #ns, -// c10::make_optional(c10::DispatchKey::k), +// std::make_optional(c10::DispatchKey::k), // __FILE__, // __LINE__) @@ -17982,6 +18012,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at::impl") public static native @Cast("bool") boolean torch_function_mode_enabled(); +@Namespace("at::impl") public static native @Cast("bool") boolean torch_function_all_disabled(); + // namespace at::impl @@ -17989,9 +18021,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once +// #include // #include -// #include // #include +// #include // #include // #include @@ -18194,8 +18227,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include // #include +// #include // namespace torch::autograd @@ -18212,9 +18245,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // all threads into a single queue, then process together at the end // in the main thread. - // namespace utils - // namespace autograd - // namespace torch + // namespace torch::autograd::utils // Parsed from torch/csrc/autograd/graph_task.h @@ -18312,7 +18343,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include // Targeting ../CPUGeneratorImpl.java @@ -18332,6 +18363,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include +// #include // Targeting ../AcceleratorHooksInterface.java @@ -18349,11 +18381,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include +// #include + +// #include // #include // #include - @Namespace("at") @MemberGetter public static native @Cast("const char*") BytePointer MTIA_HELP(); // Targeting ../MTIAHooksInterface.java @@ -18383,9 +18417,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // - It provides a set of common APIs as defined by AcceleratorHooksInterface // // As of today, accelerator devices are (in no particular order): -// CUDA, MTIA, PrivateUse1 -// We want to add once all the proper APIs are supported and tested: -// HIP, MPS, XPU +// CUDA, MTIA, XPU, HIP, MPS, PrivateUse1 // Ensures that only one accelerator is available (at // compile time if possible) and return it. @@ -18393,6 +18425,9 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal DeviceTypeOptional getAccelerator(@Cast("bool") boolean checked/*=false*/); @Namespace("at") public static native @ByVal DeviceTypeOptional getAccelerator(); +@Namespace("at") public static native @Cast("bool") boolean isAccelerator(DeviceType d); +@Namespace("at") public static native @Cast("bool") boolean isAccelerator(@Cast("c10::DeviceType") byte d); + // namespace at @@ -18501,6 +18536,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include +// #include + // #include @@ -18607,8 +18644,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native void RegisterPrivateUse1HooksInterface( PrivateUse1HooksInterface hook_); -@Namespace("at") public static native PrivateUse1HooksInterface GetPrivateUse1HooksInterface(); - @Namespace("at") public static native @Cast("bool") boolean isPrivateUse1HooksRegistered(); @Namespace("at::detail") public static native @Const @ByRef PrivateUse1HooksInterface getPrivateUse1Hooks(); @@ -18624,10 +18659,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include // #include -@Namespace("at") @MemberGetter public static native @Cast("const char*") BytePointer XPU_HELP(); +// #include +// #include // Targeting ../XPUHooksInterface.java @@ -18700,8 +18735,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // Reads an environment variable and returns -// - optional, if set equal to "1" -// - optional, if set equal to "0" +// - std::optional, if set equal to "1" +// - std::optional, if set equal to "0" // - nullopt, otherwise // // NB: @@ -18939,7 +18974,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal LongArrayRef size, ScalarType dtype, @Cast("bool") boolean pin_memory/*=false*/, - @ByVal(nullValue = "std::optional(c10::nullopt)") MemoryFormatOptional memory_format_opt); + @ByVal(nullValue = "std::optional(std::nullopt)") MemoryFormatOptional memory_format_opt); @Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( @ByVal LongArrayRef size, ScalarType dtype); @@ -18947,7 +18982,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, ScalarType dtype, @Cast("bool") boolean pin_memory/*=false*/, - @ByVal(nullValue = "std::optional(c10::nullopt)") MemoryFormatOptional memory_format_opt); + @ByVal(nullValue = "std::optional(std::nullopt)") MemoryFormatOptional memory_format_opt); @Namespace("at::detail") public static native @ByVal TensorBase empty_cpu( @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, ScalarType dtype); @@ -19016,14 +19051,14 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at::detail") public static native @ByVal TensorBase empty_meta( @ByVal LongArrayRef size, ScalarType dtype, - @ByVal(nullValue = "std::optional(c10::nullopt)") MemoryFormatOptional memory_format_opt); + @ByVal(nullValue = "std::optional(std::nullopt)") MemoryFormatOptional memory_format_opt); @Namespace("at::detail") public static native @ByVal TensorBase empty_meta( @ByVal LongArrayRef size, ScalarType dtype); @Namespace("at::detail") public static native @ByVal TensorBase empty_meta( @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, ScalarType dtype, - @ByVal(nullValue = "std::optional(c10::nullopt)") MemoryFormatOptional memory_format_opt); + @ByVal(nullValue = "std::optional(std::nullopt)") MemoryFormatOptional memory_format_opt); @Namespace("at::detail") public static native @ByVal TensorBase empty_meta( @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] size, ScalarType dtype); @@ -19091,8 +19126,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal SymIntArrayRef stride, @ByVal ScalarTypeOptional dtype_opt, @ByVal LayoutOptional layout_opt, - @ByVal DeviceOptional device_opt, - @ByVal BoolOptional pin_memory_opt); + @ByVal DeviceOptional device_opt); @Namespace("at::detail") public static native @ByVal TensorBase empty_strided_symint_meta( @ByVal SymIntArrayRef size, @@ -19169,10 +19203,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include -// #include -// #include -// #include // #define AT_DISALLOW_COPY_AND_ASSIGN(TypeName) // TypeName(const TypeName&) = delete; @@ -19652,7 +19682,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -19690,7 +19720,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -19725,7 +19755,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -19763,7 +19793,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -19801,7 +19831,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -19832,7 +19862,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -19886,7 +19916,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -19940,7 +19970,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -19972,7 +20002,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20003,7 +20033,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20041,7 +20071,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20076,7 +20106,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20114,7 +20144,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20149,7 +20179,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20196,7 +20226,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20233,7 +20263,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20270,7 +20300,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20307,7 +20337,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20344,7 +20374,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20385,7 +20415,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20422,7 +20452,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20452,7 +20482,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20506,7 +20536,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20542,7 +20572,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20572,7 +20602,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20607,7 +20637,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20635,7 +20665,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20666,7 +20696,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20694,7 +20724,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20764,7 +20794,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20795,7 +20825,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20828,7 +20858,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20868,7 +20898,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20908,7 +20938,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20945,7 +20975,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -20978,7 +21008,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21013,7 +21043,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21083,7 +21113,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21138,7 +21168,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21176,7 +21206,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21214,7 +21244,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21252,7 +21282,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21290,7 +21320,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21328,7 +21358,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21363,7 +21393,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21401,7 +21431,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21438,7 +21468,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21475,7 +21505,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21490,16 +21520,16 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self, @Cast("bool") boolean stable, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); @Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self, @Cast("bool") boolean stable); -// aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor -@Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean descending/*=false*/); -@Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self, @ByVal Dimname dim); - // aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor argsort_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("bool") boolean stable, @Cast("int64_t") long dim/*=-1*/, @Cast("bool") boolean descending/*=false*/); @Namespace("at") public static native @ByRef Tensor argsort_out(@ByRef Tensor out, @Const @ByRef Tensor self, @Cast("bool") boolean stable); // aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!) @Namespace("at") public static native @ByRef Tensor argsort_outf(@Const @ByRef Tensor self, @Cast("bool") boolean stable, @Cast("int64_t") long dim, @Cast("bool") boolean descending, @ByRef Tensor out); +// aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self, @ByVal Dimname dim, @Cast("bool") boolean descending/*=false*/); +@Namespace("at") public static native @ByVal Tensor argsort(@Const @ByRef Tensor self, @ByVal Dimname dim); + @@ -21520,7 +21550,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21550,7 +21580,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21601,7 +21631,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21661,7 +21691,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21721,7 +21751,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21759,7 +21789,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21797,7 +21827,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21835,7 +21865,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21870,7 +21900,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21908,7 +21938,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21942,7 +21972,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -21976,7 +22006,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22010,7 +22040,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22043,7 +22073,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22085,7 +22115,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22123,7 +22153,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22165,7 +22195,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22203,7 +22233,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22240,7 +22270,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22289,7 +22319,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22319,7 +22349,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22349,7 +22379,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22384,7 +22414,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22419,7 +22449,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22454,7 +22484,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22489,7 +22519,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22524,7 +22554,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22559,7 +22589,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22594,7 +22624,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22648,7 +22678,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22679,7 +22709,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22716,7 +22746,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22753,7 +22783,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22790,7 +22820,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22827,7 +22857,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22864,7 +22894,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22915,7 +22945,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -22966,7 +22996,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23001,7 +23031,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23052,7 +23082,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23103,7 +23133,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23154,7 +23184,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23203,7 +23233,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23241,7 +23271,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23276,7 +23306,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23307,7 +23337,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23343,7 +23373,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23390,7 +23420,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23420,7 +23450,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23451,7 +23481,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23504,7 +23534,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23541,7 +23571,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23569,7 +23599,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23604,7 +23634,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23635,7 +23665,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23673,7 +23703,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23714,7 +23744,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23752,7 +23782,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23780,7 +23810,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23831,7 +23861,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23868,7 +23898,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23905,7 +23935,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23942,7 +23972,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -23972,7 +24002,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24003,7 +24033,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24058,7 +24088,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24107,7 +24137,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24156,7 +24186,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24211,7 +24241,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24248,7 +24278,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24276,7 +24306,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24333,7 +24363,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24361,7 +24391,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24396,7 +24426,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24434,7 +24464,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24465,7 +24495,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24500,7 +24530,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24553,7 +24583,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24606,7 +24636,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24636,7 +24666,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24674,7 +24704,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24734,7 +24764,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24762,7 +24792,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24818,7 +24848,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24874,7 +24904,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24930,7 +24960,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -24984,7 +25014,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25021,7 +25051,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25051,7 +25081,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25089,7 +25119,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25127,7 +25157,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25165,7 +25195,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25219,7 +25249,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25279,7 +25309,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25333,7 +25363,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25387,7 +25417,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25424,7 +25454,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25465,7 +25495,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25508,7 +25538,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25538,7 +25568,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25576,7 +25606,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25614,7 +25644,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25645,7 +25675,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25676,7 +25706,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25724,7 +25754,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25755,7 +25785,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25792,7 +25822,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25829,7 +25859,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25857,7 +25887,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25892,7 +25922,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25929,7 +25959,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25964,7 +25994,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -25999,7 +26029,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26034,7 +26064,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26069,7 +26099,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26123,7 +26153,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26177,7 +26207,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26231,7 +26261,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26285,7 +26315,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26320,7 +26350,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26355,7 +26385,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26385,7 +26415,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26428,7 +26458,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26458,7 +26488,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26501,7 +26531,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26548,7 +26578,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26578,7 +26608,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26625,7 +26655,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26660,7 +26690,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26688,7 +26718,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26726,7 +26756,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26754,7 +26784,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26800,7 +26830,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26830,7 +26860,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26863,7 +26893,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26898,7 +26928,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26935,7 +26965,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -26972,7 +27002,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27003,7 +27033,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27038,7 +27068,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27092,7 +27122,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27129,7 +27159,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27166,7 +27196,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27203,7 +27233,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27238,7 +27268,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27275,7 +27305,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27334,7 +27364,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27383,7 +27413,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27418,7 +27448,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27451,7 +27481,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27485,7 +27515,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27523,7 +27553,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27560,7 +27590,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27601,7 +27631,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27636,7 +27666,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27691,7 +27721,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27726,7 +27756,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27760,7 +27790,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27811,7 +27841,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27849,7 +27879,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27879,7 +27909,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -27966,7 +27996,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28005,7 +28035,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28076,7 +28106,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28121,7 +28151,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28187,7 +28217,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28230,7 +28260,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28260,7 +28290,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28298,7 +28328,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28336,7 +28366,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28371,7 +28401,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28409,7 +28439,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28447,7 +28477,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28475,7 +28505,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28503,7 +28533,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28563,7 +28593,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28601,7 +28631,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28638,7 +28668,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28733,7 +28763,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28763,7 +28793,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28798,7 +28828,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28828,7 +28858,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28861,7 +28891,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28896,7 +28926,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28926,7 +28956,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28956,7 +28986,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -28986,7 +29016,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29016,7 +29046,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29046,7 +29076,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29076,7 +29106,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29106,7 +29136,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29139,7 +29169,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29172,7 +29202,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29205,7 +29235,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29260,7 +29290,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29327,7 +29357,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29366,7 +29396,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29427,7 +29457,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29459,7 +29489,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29514,7 +29544,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29581,7 +29611,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29642,7 +29672,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29697,7 +29727,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29764,7 +29794,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29825,7 +29855,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29857,7 +29887,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29912,7 +29942,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -29979,7 +30009,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30040,7 +30070,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30095,7 +30125,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30162,7 +30192,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30223,7 +30253,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30278,7 +30308,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30345,7 +30375,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30384,7 +30414,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30445,7 +30475,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30494,7 +30524,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30522,7 +30552,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30560,7 +30590,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30601,7 +30631,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30632,7 +30662,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30670,7 +30700,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30700,7 +30730,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30730,7 +30760,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30781,7 +30811,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30819,7 +30849,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30862,7 +30892,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30897,7 +30927,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30932,7 +30962,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -30975,7 +31005,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31013,7 +31043,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31051,7 +31081,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31089,7 +31119,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31127,7 +31157,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31165,7 +31195,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31200,7 +31230,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31246,7 +31276,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal LongArrayRef strides, PointerConsumer deleter, @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional target_device); + @Const @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional target_device); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, @ByVal LongArrayRef sizes, @@ -31258,7 +31288,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] strides, PointerConsumer deleter, @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional target_device); + @Const @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional target_device); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, @@ -31272,7 +31302,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Cast("int64_t") long storage_offset, PointerConsumer deleter, @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional target_device); + @Const @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional target_device); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, @ByVal LongArrayRef sizes, @@ -31286,7 +31316,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Cast("int64_t") long storage_offset, PointerConsumer deleter, @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional target_device); + @Const @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional target_device); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, @@ -31299,7 +31329,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal LongArrayRef sizes, PointerConsumer deleter, @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional target_device); + @Const @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional target_device); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, @ByVal LongArrayRef sizes, @@ -31309,7 +31339,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, PointerConsumer deleter, @Const @ByRef(nullValue = "c10::TensorOptions{}") TensorOptions options, - @Const @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional target_device); + @Const @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional target_device); @Namespace("at") public static native @ByVal Tensor from_blob( Pointer data, @ByVal @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] sizes, @@ -31369,7 +31399,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31414,7 +31444,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31496,7 +31526,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31535,7 +31565,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31566,7 +31596,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31613,7 +31643,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31643,7 +31673,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31681,7 +31711,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31724,7 +31754,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31769,7 +31799,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31809,7 +31839,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31846,7 +31876,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31881,7 +31911,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31916,7 +31946,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31953,7 +31983,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -31988,7 +32018,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32023,7 +32053,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32058,7 +32088,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32123,7 +32153,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32166,7 +32196,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32209,7 +32239,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32239,7 +32269,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32274,7 +32304,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32309,7 +32339,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32344,7 +32374,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32379,7 +32409,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32410,7 +32440,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32445,7 +32475,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32476,7 +32506,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32519,7 +32549,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32590,7 +32620,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32639,7 +32669,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32676,7 +32706,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32711,7 +32741,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32749,7 +32779,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32784,7 +32814,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32822,7 +32852,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32857,7 +32887,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32898,7 +32928,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32933,7 +32963,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32968,7 +32998,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -32999,7 +33029,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33036,7 +33066,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33086,7 +33116,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33134,7 +33164,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33168,7 +33198,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33203,7 +33233,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33241,7 +33271,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33278,7 +33308,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33313,7 +33343,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33348,7 +33378,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33386,7 +33416,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33421,7 +33451,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33456,7 +33486,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33494,7 +33524,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33524,7 +33554,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33559,7 +33589,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33600,7 +33630,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33638,7 +33668,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33687,7 +33717,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33728,7 +33758,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33770,7 +33800,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33813,7 +33843,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33849,7 +33879,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33877,7 +33907,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33912,7 +33942,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33942,7 +33972,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -33977,7 +34007,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34007,7 +34037,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34042,7 +34072,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34077,7 +34107,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34105,7 +34135,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34135,7 +34165,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34165,7 +34195,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34195,7 +34225,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34225,7 +34255,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34255,7 +34285,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34283,7 +34313,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34313,7 +34343,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34343,7 +34373,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34371,7 +34401,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34401,7 +34431,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34429,7 +34459,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34459,7 +34489,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34489,7 +34519,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34520,7 +34550,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34550,7 +34580,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34607,7 +34637,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34642,7 +34672,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34677,7 +34707,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34712,7 +34742,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34747,7 +34777,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34777,7 +34807,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34808,7 +34838,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34836,7 +34866,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34896,7 +34926,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34927,7 +34957,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -34962,7 +34992,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35009,7 +35039,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35040,7 +35070,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35079,7 +35109,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35117,7 +35147,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35155,7 +35185,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35198,7 +35228,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35239,7 +35269,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35274,7 +35304,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35317,7 +35347,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35360,7 +35390,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35403,7 +35433,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35438,7 +35468,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35473,7 +35503,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35503,7 +35533,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35538,7 +35568,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35575,7 +35605,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35612,7 +35642,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35660,7 +35690,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35697,7 +35727,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35732,7 +35762,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35763,7 +35793,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35798,7 +35828,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35838,7 +35868,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35873,7 +35903,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35913,7 +35943,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35948,7 +35978,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -35983,7 +36013,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36020,7 +36050,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36057,7 +36087,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36094,7 +36124,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36131,7 +36161,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36168,7 +36198,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36205,7 +36235,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36242,7 +36272,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36279,7 +36309,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36316,7 +36346,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36351,7 +36381,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36386,7 +36416,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36445,7 +36475,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36480,7 +36510,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36547,7 +36577,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36585,7 +36615,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36640,7 +36670,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36707,7 +36737,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36747,7 +36777,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36782,7 +36812,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36819,7 +36849,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36856,7 +36886,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36893,7 +36923,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36930,7 +36960,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -36967,7 +36997,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37004,7 +37034,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37044,7 +37074,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37081,7 +37111,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37118,7 +37148,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37158,7 +37188,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37195,7 +37225,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37230,7 +37260,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37301,7 +37331,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37339,7 +37369,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37377,7 +37407,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37415,7 +37445,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37453,7 +37483,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37490,7 +37520,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37525,7 +37555,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37560,7 +37590,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37595,7 +37625,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37636,7 +37666,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37671,7 +37701,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37706,7 +37736,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37749,7 +37779,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37779,7 +37809,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37814,7 +37844,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37849,7 +37879,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37884,7 +37914,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37919,7 +37949,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37960,7 +37990,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -37997,7 +38027,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38072,7 +38102,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38129,7 +38159,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38172,7 +38202,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38207,7 +38237,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38240,7 +38270,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38278,7 +38308,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38321,7 +38351,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38356,7 +38386,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38393,7 +38423,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38421,7 +38451,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38449,7 +38479,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38480,7 +38510,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38523,7 +38553,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38558,7 +38588,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38594,7 +38624,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38629,7 +38659,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38659,7 +38689,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38694,7 +38724,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38729,7 +38759,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38757,7 +38787,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38787,7 +38817,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38817,7 +38847,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38852,7 +38882,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38915,7 +38945,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38948,7 +38978,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -38981,7 +39011,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39014,7 +39044,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39056,7 +39086,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39098,7 +39128,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39136,7 +39166,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39169,7 +39199,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39211,7 +39241,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39249,7 +39279,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39303,7 +39333,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39360,7 +39390,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39395,7 +39425,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39406,6 +39436,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self, @ByVal(nullValue = "std::optional(::std::nullopt)") ScalarTypeOptional dtype); @Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self); +// aten::mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mean_out(@ByRef Tensor out, @Const @ByRef Tensor self, @ByVal(nullValue = "std::optional(::std::nullopt)") ScalarTypeOptional dtype); +@Namespace("at") public static native @ByRef Tensor mean_out(@ByRef Tensor out, @Const @ByRef Tensor self); +// aten::mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!) +@Namespace("at") public static native @ByRef Tensor mean_outf(@Const @ByRef Tensor self, @ByVal ScalarTypeOptional dtype, @ByRef Tensor out); + // aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor @Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim, @Cast("bool") boolean keepdim/*=false*/, @ByVal(nullValue = "std::optional(::std::nullopt)") ScalarTypeOptional dtype); @Namespace("at") public static native @ByVal Tensor mean(@Const @ByRef Tensor self, @ByVal LongArrayRefOptional dim); @@ -39456,7 +39492,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39511,7 +39547,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39548,7 +39584,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39611,7 +39647,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39646,7 +39682,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39681,7 +39717,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39716,7 +39752,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39770,7 +39806,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39806,7 +39842,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39842,7 +39878,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39896,7 +39932,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39950,7 +39986,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -39988,7 +40024,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40026,7 +40062,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40064,7 +40100,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40094,7 +40130,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40132,7 +40168,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40167,7 +40203,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40221,7 +40257,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40258,7 +40294,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40293,7 +40329,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40331,7 +40367,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40366,7 +40402,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40408,7 +40444,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40450,7 +40486,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40492,7 +40528,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40534,7 +40570,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40598,7 +40634,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40662,7 +40698,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40700,7 +40736,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40738,7 +40774,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40773,7 +40809,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40820,7 +40856,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40854,7 +40890,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40888,7 +40924,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40942,7 +40978,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -40996,7 +41032,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41033,7 +41069,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41068,7 +41104,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41103,7 +41139,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41146,7 +41182,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41183,7 +41219,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41220,7 +41256,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41257,7 +41293,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41292,7 +41328,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41327,7 +41363,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41364,7 +41400,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41402,7 +41438,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41437,7 +41473,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41472,7 +41508,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41513,7 +41549,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41553,7 +41589,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41608,7 +41644,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41661,7 +41697,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41701,7 +41737,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41744,7 +41780,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41795,7 +41831,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41830,7 +41866,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41865,7 +41901,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41900,7 +41936,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41935,7 +41971,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -41970,7 +42006,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42021,7 +42057,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42072,7 +42108,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42126,7 +42162,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42180,7 +42216,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42228,7 +42264,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42271,7 +42307,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42309,7 +42345,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42347,7 +42383,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42379,7 +42415,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42427,7 +42463,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42475,7 +42511,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42523,7 +42559,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42571,7 +42607,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42619,7 +42655,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42654,7 +42690,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42709,7 +42745,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42764,7 +42800,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42815,7 +42851,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42866,7 +42902,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42917,7 +42953,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -42968,7 +43004,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43005,7 +43041,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43040,7 +43076,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43070,7 +43106,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43107,7 +43143,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43204,7 +43240,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43235,7 +43271,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43340,7 +43376,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43383,7 +43419,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43435,7 +43471,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43463,7 +43499,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43494,7 +43530,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43576,7 +43612,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43615,7 +43651,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43648,7 +43684,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43683,7 +43719,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43720,7 +43756,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43755,7 +43791,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43783,7 +43819,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43825,18 +43861,20 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include // #include -// aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0) -> Tensor -@Namespace("at") public static native @ByVal Tensor pad_sequence(@ByVal TensorArrayRef sequences, @Cast("bool") boolean batch_first/*=false*/, double padding_value/*=0.0*/); +// aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0, str padding_side="right") -> Tensor +@Namespace("at") public static native @ByVal Tensor pad_sequence(@ByVal TensorArrayRef sequences, @Cast("bool") boolean batch_first/*=false*/, double padding_value/*=0.0*/, @StringView BytePointer padding_side/*="right"*/); @Namespace("at") public static native @ByVal Tensor pad_sequence(@ByVal TensorArrayRef sequences); -@Namespace("at") public static native @ByVal Tensor pad_sequence(@ByVal TensorVector sequences, @Cast("bool") boolean batch_first/*=false*/, double padding_value/*=0.0*/); +@Namespace("at") public static native @ByVal Tensor pad_sequence(@ByVal TensorVector sequences, @Cast("bool") boolean batch_first/*=false*/, double padding_value/*=0.0*/, @StringView String padding_side/*="right"*/); @Namespace("at") public static native @ByVal Tensor pad_sequence(@ByVal TensorVector sequences); +@Namespace("at") public static native @ByVal Tensor pad_sequence(@ByVal TensorVector sequences, @Cast("bool") boolean batch_first/*=false*/, double padding_value/*=0.0*/, @StringView BytePointer padding_side/*="right"*/); +@Namespace("at") public static native @ByVal Tensor pad_sequence(@ByVal TensorArrayRef sequences, @Cast("bool") boolean batch_first/*=false*/, double padding_value/*=0.0*/, @StringView String padding_side/*="right"*/); @@ -43858,7 +43896,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43889,7 +43927,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43920,7 +43958,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43951,7 +43989,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -43989,7 +44027,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44017,7 +44055,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44048,7 +44086,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44083,7 +44121,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44118,7 +44156,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44155,7 +44193,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44185,7 +44223,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44220,7 +44258,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44255,7 +44293,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44285,7 +44323,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44336,7 +44374,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44366,7 +44404,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44423,7 +44461,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44453,7 +44491,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44490,7 +44528,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44520,7 +44558,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44555,7 +44593,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44590,7 +44628,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44620,7 +44658,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44650,7 +44688,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44687,7 +44725,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44715,7 +44753,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44768,7 +44806,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44803,7 +44841,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44857,7 +44895,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44892,7 +44930,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44927,7 +44965,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44957,7 +44995,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -44988,7 +45026,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -45030,7 +45068,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -45072,7 +45110,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -45114,7 +45152,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -45144,7 +45182,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -45174,7 +45212,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -45212,7 +45250,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -45395,7 +45433,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -45434,7 +45472,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -45617,7 +45655,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -45716,7 +45754,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -45899,7 +45937,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -45938,7 +45976,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -45995,7 +46033,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46090,7 +46128,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46137,7 +46175,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46167,7 +46205,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46197,7 +46235,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46235,7 +46273,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46263,7 +46301,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46291,7 +46329,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46345,7 +46383,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46399,7 +46437,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46453,7 +46491,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46507,7 +46545,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46561,7 +46599,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46615,7 +46653,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46653,7 +46691,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46686,7 +46724,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46737,7 +46775,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46765,7 +46803,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46800,7 +46838,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46846,7 +46884,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46921,7 +46959,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -46975,7 +47013,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47029,7 +47067,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47083,7 +47121,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47137,7 +47175,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47191,7 +47229,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47245,7 +47283,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47273,7 +47311,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47309,7 +47347,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47337,7 +47375,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47398,7 +47436,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47439,7 +47477,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47477,7 +47515,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47507,7 +47545,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47537,7 +47575,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47576,7 +47614,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47604,7 +47642,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47632,7 +47670,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47665,7 +47703,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47700,7 +47738,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47731,7 +47769,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47766,7 +47804,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47797,7 +47835,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47860,7 +47898,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47900,7 +47938,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47949,7 +47987,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -47977,7 +48015,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48012,7 +48050,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48050,7 +48088,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48085,7 +48123,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48126,7 +48164,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48161,7 +48199,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48204,7 +48242,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48242,7 +48280,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48289,7 +48327,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48327,15 +48365,15 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include // #include -// aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> Tensor -@Namespace("at") public static native @ByVal Tensor scaled_dot_product_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef(nullValue = "std::optional{}") TensorOptional attn_mask, double dropout_p/*=0.0*/, @Cast("bool") boolean is_causal/*=false*/, @ByVal(nullValue = "std::optional(::std::nullopt)") DoubleOptional scale); +// aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> Tensor +@Namespace("at") public static native @ByVal Tensor scaled_dot_product_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef(nullValue = "std::optional{}") TensorOptional attn_mask, double dropout_p/*=0.0*/, @Cast("bool") boolean is_causal/*=false*/, @ByVal(nullValue = "std::optional(::std::nullopt)") DoubleOptional scale, @Cast("bool") boolean enable_gqa/*=false*/); @Namespace("at") public static native @ByVal Tensor scaled_dot_product_attention(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value); @@ -48358,7 +48396,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48429,7 +48467,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48467,7 +48505,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48509,7 +48547,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48556,7 +48594,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48598,7 +48636,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48636,7 +48674,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48690,7 +48728,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48741,7 +48779,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48792,7 +48830,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48825,7 +48863,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48912,7 +48950,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48940,7 +48978,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -48975,7 +49013,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49013,7 +49051,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49048,7 +49086,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49083,7 +49121,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49118,7 +49156,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49156,7 +49194,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49191,7 +49229,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49229,7 +49267,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49267,7 +49305,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49305,7 +49343,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49338,7 +49376,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49375,7 +49413,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49429,7 +49467,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49484,7 +49522,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49521,7 +49559,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49576,7 +49614,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49611,7 +49649,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49671,7 +49709,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49725,7 +49763,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49785,7 +49823,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49845,7 +49883,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49905,7 +49943,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49965,7 +50003,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -49995,7 +50033,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50032,7 +50070,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50067,7 +50105,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50104,7 +50142,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50139,7 +50177,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50180,7 +50218,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50217,7 +50255,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50252,7 +50290,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50289,7 +50327,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50324,7 +50362,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50391,7 +50429,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50430,7 +50468,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50469,7 +50507,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50519,7 +50557,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50575,7 +50613,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50614,7 +50652,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50653,7 +50691,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50681,7 +50719,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50713,7 +50751,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50751,7 +50789,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50789,7 +50827,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50826,7 +50864,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50861,7 +50899,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50896,7 +50934,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50931,7 +50969,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -50966,7 +51004,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51001,7 +51039,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51052,7 +51090,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51103,7 +51141,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51154,7 +51192,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51205,7 +51243,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51240,7 +51278,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51275,7 +51313,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51310,7 +51348,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51345,7 +51383,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51380,7 +51418,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51415,7 +51453,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51450,7 +51488,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51485,7 +51523,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51520,7 +51558,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51555,7 +51593,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51590,7 +51628,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51625,7 +51663,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51676,7 +51714,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51727,7 +51765,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51762,7 +51800,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51797,7 +51835,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51832,7 +51870,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51867,7 +51905,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51918,7 +51956,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -51969,7 +52007,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52004,7 +52042,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52039,7 +52077,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52070,7 +52108,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52107,7 +52145,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52149,7 +52187,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52184,7 +52222,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52219,7 +52257,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52254,7 +52292,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52289,7 +52327,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52324,7 +52362,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52359,7 +52397,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52394,7 +52432,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52429,7 +52467,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52464,7 +52502,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52501,7 +52539,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52536,7 +52574,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52571,7 +52609,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52622,7 +52660,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52673,7 +52711,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52724,7 +52762,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52775,7 +52813,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52810,7 +52848,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52841,7 +52879,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52876,7 +52914,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52927,7 +52965,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -52978,7 +53016,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53029,7 +53067,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53078,7 +53116,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53139,7 +53177,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53178,7 +53216,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53241,7 +53279,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53279,7 +53317,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53317,7 +53355,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53357,7 +53395,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53411,7 +53449,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53448,7 +53486,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53490,7 +53528,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53578,7 +53616,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53639,7 +53677,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53673,7 +53711,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53706,7 +53744,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53753,7 +53791,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53794,7 +53832,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53861,7 +53899,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53889,7 +53927,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53926,7 +53964,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53956,7 +53994,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -53986,7 +54024,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54017,7 +54055,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54048,7 +54086,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54078,7 +54116,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54108,7 +54146,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54138,7 +54176,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54168,7 +54206,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54198,7 +54236,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54233,7 +54271,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54268,7 +54306,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54305,7 +54343,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54343,7 +54381,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54381,7 +54419,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54416,7 +54454,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54553,7 +54591,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54591,7 +54629,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54651,7 +54689,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54689,7 +54727,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54724,7 +54762,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54760,7 +54798,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54788,7 +54826,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54816,7 +54854,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54847,7 +54885,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54880,7 +54918,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54910,7 +54948,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54958,7 +54996,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -54986,7 +55024,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55014,7 +55052,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55042,7 +55080,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55070,7 +55108,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55098,7 +55136,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55153,7 +55191,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55188,7 +55226,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55224,7 +55262,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55257,7 +55295,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55292,7 +55330,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55327,7 +55365,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55362,7 +55400,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55399,7 +55437,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55436,7 +55474,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55475,7 +55513,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55506,7 +55544,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55543,7 +55581,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55582,7 +55620,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55620,7 +55658,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55658,7 +55696,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55686,7 +55724,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55720,7 +55758,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55760,7 +55798,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55806,7 +55844,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55837,7 +55875,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55865,7 +55903,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55919,7 +55957,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55954,7 +55992,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -55991,7 +56029,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56028,7 +56066,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56065,7 +56103,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56102,7 +56140,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56133,7 +56171,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56194,7 +56232,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56257,7 +56295,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56287,7 +56325,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56322,7 +56360,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56392,7 +56430,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56452,7 +56490,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56522,7 +56560,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56582,7 +56620,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56652,7 +56690,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56712,7 +56750,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56782,7 +56820,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56842,7 +56880,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56912,7 +56950,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -56972,7 +57010,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57042,7 +57080,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57102,7 +57140,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57172,7 +57210,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57232,7 +57270,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57268,7 +57306,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57296,7 +57334,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57331,7 +57369,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57362,7 +57400,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57450,7 +57488,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57511,7 +57549,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57546,7 +57584,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57574,7 +57612,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57602,7 +57640,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57632,7 +57670,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57667,7 +57705,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57697,7 +57735,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57732,7 +57770,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57794,7 +57832,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57828,7 +57866,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57866,7 +57904,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57913,7 +57951,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -57970,7 +58008,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -58003,7 +58041,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -58041,7 +58079,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -58123,7 +58161,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include @@ -58217,7 +58255,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include // #include // #include @@ -58427,6 +58465,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include // #include // #include // #include @@ -58468,9 +58508,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include +// #include // #include // #include // #include @@ -58479,6 +58521,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include // #include // #include // #include @@ -58522,6 +58566,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -58564,6 +58609,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include // #include // #include // #include @@ -58593,6 +58640,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include // #include // #include // #include @@ -59578,7 +59627,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include // #include // #include @@ -59976,7 +60024,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { //===----------------------------------------------------------------------===// // Utilities //===----------------------------------------------------------------------===// - // namespace detail // namespace torch @@ -60119,6 +60166,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../TensorArgs.java +// Targeting ../LiftedIValueArg.java + + +// Targeting ../LiftedIValueArgs.java + + // Targeting ../AutogradCompilerCall.java @@ -60295,7 +60348,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { * The method should_include_kernel_dtype() returns true/false * based on whether the switching code for a specific dtype should be * included based on build time constants generated from tracing model - * execution. This method will be implmeneted via code-generation and + * execution. This method will be implemented via code-generation and * included in this file when code-gen is ready. */ @Namespace("at") public static native @Cast("const bool") boolean should_include_kernel_dtype( @@ -61592,7 +61645,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include // #include // #include // #include @@ -61803,6 +61856,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include // #include // #include // #include @@ -61844,9 +61899,11 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include +// #include // #include // #include // #include @@ -61855,6 +61912,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include // #include // #include // #include @@ -61898,6 +61957,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include // #include // #include // #include @@ -61940,6 +62000,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include // #include // #include // #include @@ -61969,6 +62031,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include +// #include +// #include // #include // #include // #include @@ -62913,8 +62977,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #include +// #include // #ifndef AT_PER_OPERATOR_HEADERS // #include @@ -62941,8 +63005,6 @@ public class torch extends org.bytedeco.pytorch.presets.torch { public TensorIndexType intern() { for (TensorIndexType e : values()) if (e.value == value) return e; return this; } @Override public String toString() { return intern().name(); } } - -@Namespace("at::indexing") @MemberGetter public static native @ByRef @Cast("const c10::nullopt_t*") Pointer None(); // Targeting ../EllipsisIndexType.java @@ -63227,13 +63289,13 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // _(!=, x.ne(y), y.ne(x)) // #define DEFINE_OPERATOR(op, body, reverse_scalar_body) -// static inline Tensor operator op(const Tensor& x, const Tensor& y) { +// inline Tensor operator op(const Tensor& x, const Tensor& y) { // return body; // } -// static inline Tensor operator op(const Tensor& x, const Scalar& y) { +// inline Tensor operator op(const Tensor& x, const Scalar& y) { // return body; // } -// static inline Tensor operator op(const Scalar& x, const Tensor& y) { +// inline Tensor operator op(const Scalar& x, const Tensor& y) { // return reverse_scalar_body; // } @Namespace("at") public static native @ByVal @Name("operator +") Tensor add(@Const @ByRef Scalar x, @Const @ByRef Tensor y); @@ -63943,8 +64005,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch::jit") public static native @ByVal OperatorName parseName(@StdString BytePointer name); @Namespace("torch::jit") public static native @ByVal OperatorName parseName(@StdString String name); - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/api/include/torch/types.h @@ -63953,7 +64014,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include -// #include +// #include // #include // #include @@ -64047,7 +64108,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include +// #include // #include // #include @@ -64648,8 +64709,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Targeting ../GraphsAttr.java - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/ir/graph_node_list.h @@ -64702,8 +64762,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { - // namespace jit - // namespace torch + // namespace torch::jit // namespace std @@ -64712,14 +64771,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include -// #include +// #include // #include // #include // #include -// #include // #include -// #include // #include // #include // Targeting ../SourceRangeUnpickler.java @@ -64762,10 +64819,10 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #pragma once // #include // #include -// #include // #include // #include // #include +// #include // #include @Namespace("torch::jit") @MemberGetter public static native @Cast("const size_t") long kModuleInstanceInfo(); @@ -64823,8 +64880,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch::jit") @MemberGetter public static native @Cast("const size_t") long kDebugInfoTupleSourceRangeIndex(); @Namespace("torch::jit") @MemberGetter public static native @Cast("const size_t") long kDebugInfoTupleNodeNameIndex(); @Namespace("torch::jit") @MemberGetter public static native @Cast("const size_t") long kDebugInfoTupleInlinedCSIndex(); - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/ir/constants.h @@ -64846,8 +64902,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { @Namespace("torch::jit") public static native Value insertConstant( @ByRef Graph g, @Const @ByRef IValue val, - @ByVal(nullValue = "std::optional(c10::nullopt)") SourceRangeOptional loc, - @ByVal(nullValue = "std::optional(c10::nullopt)") @Cast("std::optional*") ScopeOptional scope); + @ByVal(nullValue = "std::optional(std::nullopt)") SourceRangeOptional loc, + @ByVal(nullValue = "std::optional(std::nullopt)") @Cast("std::optional*") ScopeOptional scope); @Namespace("torch::jit") public static native Value insertConstant( @ByRef Graph g, @Const @ByRef IValue val); @@ -64857,12 +64913,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // closely related to the implementation of prim::Constant that is also in // constants.cpp. // -// returns a c10::nullopt if the IValue kind cannot be inserted as a constant +// returns a std::nullopt if the IValue kind cannot be inserted as a constant @Namespace("torch::jit") public static native @ByVal ValueOptional tryInsertConstant( @ByRef Graph g, @Const @ByRef IValue val, - @ByVal(nullValue = "std::optional(c10::nullopt)") SourceRangeOptional loc, - @ByVal(nullValue = "std::optional(c10::nullopt)") @Cast("std::optional*") ScopeOptional scope); + @ByVal(nullValue = "std::optional(std::nullopt)") SourceRangeOptional loc, + @ByVal(nullValue = "std::optional(std::nullopt)") @Cast("std::optional*") ScopeOptional scope); @Namespace("torch::jit") public static native @ByVal ValueOptional tryInsertConstant( @ByRef Graph g, @Const @ByRef IValue val); @@ -64872,13 +64928,12 @@ public class torch extends org.bytedeco.pytorch.presets.torch { //////////////////////////////////////////////////////////////////////////////// // attempt to convert a (possibly constant) Value* into an interpreter value -// (IValue). returns c10::nullopt if the Value* was not constant +// (IValue). returns std::nullopt if the Value* was not constant @Namespace("torch::jit") public static native @ByVal IValueOptional toIValue(@Const Value v); // if a value is a constant then try to turn into type T using the // same rules as the interpreter - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/ir/named_value.h @@ -64892,8 +64947,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/runtime/operator_options.h @@ -65032,7 +65086,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include // #include // #include @@ -65215,8 +65269,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/python/update_graph_executor_opt.h @@ -65284,8 +65337,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // Parsed from torch/csrc/jit/runtime/interpreter.h // #pragma once -// #include // #include +// #include // #include // #include @@ -65469,8 +65522,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include -// #include // #include +// #include // #include @@ -65595,8 +65648,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/api/compilation_unit.h @@ -65615,7 +65667,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include +// #include // #include // #include @@ -65659,8 +65711,8 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // #include // #include // #include -// #include // #include +// #include // #include // #include @@ -65694,7 +65746,7 @@ public class torch extends org.bytedeco.pytorch.presets.torch { // details. @Namespace("torch::jit") public static native @ByVal JitModule freeze( @Const @ByRef JitModule module, - @Const @ByRef(nullValue = "std::optional >(c10::nullopt)") StringVectorOptional preserved_attrs, + @Const @ByRef(nullValue = "std::optional >(std::nullopt)") StringVectorOptional preserved_attrs, @Cast("bool") boolean optimize_numerics/*=true*/); @Namespace("torch::jit") public static native @ByVal JitModule freeze( @Const @ByRef JitModule module); @@ -65835,10 +65887,10 @@ The list of (type, depth) pairs controls the type of specializations and the num // #pragma once // #include -// #include // #include // #include // #include +// #include // #include // #include @@ -66371,7 +66423,7 @@ The list of (type, depth) pairs controls the type of specializations and the num // } // #define TORCH_ENUM_PRETTY_PRINT(name) -// std::string operator()(const enumtype::k##name& v) const { +// std::string operator()(const enumtype::k##name& v [[maybe_unused]]) const { // std::string k("k"); // return k + #name; // } @@ -66581,9 +66633,9 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor fft( @Const @ByRef Tensor self, - @ByVal(nullValue = "std::optional(c10::nullopt)") SymIntOptional n, + @ByVal(nullValue = "std::optional(std::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor fft( @Const @ByRef Tensor self); @@ -66599,9 +66651,9 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor ifft( @Const @ByRef Tensor self, - @ByVal(nullValue = "std::optional(c10::nullopt)") SymIntOptional n, + @ByVal(nullValue = "std::optional(std::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ifft( @Const @ByRef Tensor self); @@ -66617,26 +66669,26 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor fft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor fft2( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor fft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor fft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor fft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); /** Computes the inverse of torch.fft.fft2 * See https://pytorch.org/docs/main/fft.html#torch.fft.ifft2. @@ -66650,26 +66702,26 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor ifft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ifft2( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor ifft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ifft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ifft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); /** Computes the N dimensional fast Fourier transform over given dimensions. * See https://pytorch.org/docs/main/fft.html#torch.fft.fftn. @@ -66683,16 +66735,16 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor fftn( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional dim, + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor fftn( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor fftn( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); /** Computes the N dimensional fast Fourier transform over given dimensions. * See https://pytorch.org/docs/main/fft.html#torch.fft.ifftn. @@ -66706,16 +66758,16 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor ifftn( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional dim, + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ifftn( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor ifftn( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); /** Computes the 1 dimensional FFT of real input with onesided Hermitian output. * See https://pytorch.org/docs/main/fft.html#torch.fft.rfft. @@ -66731,9 +66783,9 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor rfft( @Const @ByRef Tensor self, - @ByVal(nullValue = "std::optional(c10::nullopt)") SymIntOptional n, + @ByVal(nullValue = "std::optional(std::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor rfft( @Const @ByRef Tensor self); @@ -66752,9 +66804,9 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor irfft( @Const @ByRef Tensor self, - @ByVal(nullValue = "std::optional(c10::nullopt)") SymIntOptional n, + @ByVal(nullValue = "std::optional(std::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor irfft( @Const @ByRef Tensor self); @@ -66770,26 +66822,26 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor rfft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor rfft2( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor rfft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor rfft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor rfft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); /** Computes the inverse of torch.fft.rfft2. * See https://pytorch.org/docs/main/fft.html#torch.fft.irfft2. @@ -66803,26 +66855,26 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor irfft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor irfft2( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor irfft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor irfft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor irfft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); /** Computes the N dimensional FFT of real input with onesided Hermitian output. * See https://pytorch.org/docs/main/fft.html#torch.fft.rfftn @@ -66836,16 +66888,16 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor rfftn( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional dim, + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor rfftn( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor rfftn( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); /** Computes the inverse of torch.fft.rfftn. * See https://pytorch.org/docs/main/fft.html#torch.fft.irfftn. @@ -66860,16 +66912,16 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor irfftn( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional dim, + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor irfftn( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor irfftn( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] dim, + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); /** Computes the 1 dimensional FFT of a onesided Hermitian signal * @@ -66888,9 +66940,9 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor hfft( @Const @ByRef Tensor self, - @ByVal(nullValue = "std::optional(c10::nullopt)") SymIntOptional n, + @ByVal(nullValue = "std::optional(std::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor hfft( @Const @ByRef Tensor self); @@ -66910,9 +66962,9 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor ihfft( @Const @ByRef Tensor self, - @ByVal(nullValue = "std::optional(c10::nullopt)") SymIntOptional n, + @ByVal(nullValue = "std::optional(std::nullopt)") SymIntOptional n, @Cast("int64_t") long dim/*=-1*/, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ihfft( @Const @ByRef Tensor self); @@ -66932,26 +66984,26 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor hfft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor hfft2( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor hfft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor hfft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor hfft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); /** Computes the 2-dimensional IFFT of a real input signal. * @@ -66970,26 +67022,26 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor ihfft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ihfft2( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor ihfft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ihfft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ihfft2( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); /** Computes the N-dimensional FFT of a Hermitian symmetric input signal. * @@ -67007,26 +67059,26 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor hfftn( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor hfftn( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor hfftn( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor hfftn( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor hfftn( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); /** Computes the N-dimensional IFFT of a real input signal. * @@ -67045,26 +67097,26 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor ihfftn( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ihfftn( @Const @ByRef Tensor self); @Namespace("torch::fft") public static native @ByVal Tensor ihfftn( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ihfftn( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector("int64_t") long[] dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); @Namespace("torch::fft") public static native @ByVal Tensor ihfftn( @Const @ByRef Tensor self, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long[] s, @ByVal(nullValue = "torch::IntArrayRef({-2, -1})") LongArrayRef dim, - @ByVal(nullValue = "std::optional(c10::nullopt)") StringViewOptional norm); + @ByVal(nullValue = "std::optional(std::nullopt)") StringViewOptional norm); /** Computes the discrete Fourier Transform sample frequencies for a signal of * size n. @@ -67115,12 +67167,12 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::fft") public static native @ByVal Tensor fftshift( @Const @ByRef Tensor x, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim); + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional dim); @Namespace("torch::fft") public static native @ByVal Tensor fftshift( @Const @ByRef Tensor x); @Namespace("torch::fft") public static native @ByVal Tensor fftshift( @Const @ByRef Tensor x, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); /** Inverse of torch.fft.fftshift * @@ -67135,12 +67187,12 @@ The list of (type, depth) pairs controls the type of specializations and the num * } */ @Namespace("torch::fft") public static native @ByVal Tensor ifftshift( @Const @ByRef Tensor x, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional dim); + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional dim); @Namespace("torch::fft") public static native @ByVal Tensor ifftshift( @Const @ByRef Tensor x); @Namespace("torch::fft") public static native @ByVal Tensor ifftshift( @Const @ByRef Tensor x, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... dim); // namespace fft // namespace torch @@ -67896,14 +67948,14 @@ The list of (type, depth) pairs controls the type of specializations and the num /// @Namespace("torch::nested") public static native @ByVal Tensor as_nested_tensor( @ByVal TensorArrayRef list, - @ByVal(nullValue = "std::optional(c10::nullopt)") ScalarTypeOptional dtype, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @ByVal(nullValue = "std::optional(std::nullopt)") ScalarTypeOptional dtype, + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); @Namespace("torch::nested") public static native @ByVal Tensor as_nested_tensor( @ByVal TensorArrayRef list); @Namespace("torch::nested") public static native @ByVal Tensor as_nested_tensor( @ByVal TensorVector list, - @ByVal(nullValue = "std::optional(c10::nullopt)") ScalarTypeOptional dtype, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @ByVal(nullValue = "std::optional(std::nullopt)") ScalarTypeOptional dtype, + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); @Namespace("torch::nested") public static native @ByVal Tensor as_nested_tensor( @ByVal TensorVector list); @@ -67916,14 +67968,14 @@ The list of (type, depth) pairs controls the type of specializations and the num @Namespace("torch::nested") public static native @ByVal Tensor to_padded_tensor( @Const @ByRef Tensor self, double padding, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") LongArrayRefOptional output_size); + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") LongArrayRefOptional output_size); @Namespace("torch::nested") public static native @ByVal Tensor to_padded_tensor( @Const @ByRef Tensor self, double padding); @Namespace("torch::nested") public static native @ByVal Tensor to_padded_tensor( @Const @ByRef Tensor self, double padding, - @ByVal(nullValue = "at::OptionalIntArrayRef(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); + @ByVal(nullValue = "at::OptionalIntArrayRef(std::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size); // namespace nested // namespace torch @@ -68160,9 +68212,9 @@ The list of (type, depth) pairs controls the type of specializations and the num // #define AT_MKLDNN_ENABLED() 1 // #define AT_MKLDNN_ACL_ENABLED() 0 -// #define AT_MKL_ENABLED() 0 +// #define AT_MKL_ENABLED() 1 // #define AT_MKL_SEQUENTIAL() 0 -// #define AT_POCKETFFT_ENABLED() 1 +// #define AT_POCKETFFT_ENABLED() 0 // #define AT_NNPACK_ENABLED() 1 // #define CAFFE2_STATIC_LINK_CUDA() 0 // #define AT_BUILD_WITH_BLAS() 1 @@ -68170,7 +68222,7 @@ The list of (type, depth) pairs controls the type of specializations and the num public static final int AT_PARALLEL_OPENMP = 1; public static final int AT_PARALLEL_NATIVE = 0; // #define AT_BLAS_F2C() 0 -// #define AT_BLAS_USE_CBLAS_DOT() 1 +// #define AT_BLAS_USE_CBLAS_DOT() 0 // Parsed from ATen/Parallel-inl.h @@ -68334,6 +68386,9 @@ scalar_t sf(scalar_t x, scalar_t y) @Override public String toString() { return intern().name(); } } +@Namespace("torch::profiler::impl") public static native @StdString BytePointer actToString(ActivityType t); +@Namespace("torch::profiler::impl") public static native @StdString String actToString(@Cast("torch::profiler::impl::ActivityType") int t); + @Namespace("torch::profiler::impl") public enum ProfilerState { Disabled(0), CPU(1), // CPU-only profiling @@ -68449,10 +68504,10 @@ scalar_t sf(scalar_t x, scalar_t y) // #include // #include -// #include // #include // #include // #include +// #include // TODO: replace with pytorch/rfcs#43 when it is ready. // #define SOFT_ASSERT(cond, ...) @@ -68527,6 +68582,7 @@ scalar_t sf(scalar_t x, scalar_t y) @Namespace("torch::profiler::impl") public static native @StdString BytePointer strListToStr(@Const @ByRef StringVector types); @Namespace("torch::profiler::impl") public static native @StdString BytePointer inputOpIdsToStr( @Const @ByRef RecordFunctionHandleIntList input_op_ids); +@Namespace("torch::profiler::impl") public static native @StdString BytePointer ivalueToStr(@Const @ByRef IValue val, @Cast("bool") boolean isString); @Namespace("torch::profiler::impl") public static native @StdString BytePointer ivalueListToStr(@Const @ByRef IValueVector list); @Namespace("torch::profiler::impl") public static native @ByVal StringVector inputTypes(@Const @ByRef RecordFunction fn); @@ -68632,6 +68688,32 @@ scalar_t sf(scalar_t x, scalar_t y) @Const @ByRef ProfilerConfig config, @Const @ByRef ActivityTypeSet activities); +@Namespace("torch::autograd::profiler") public static native void toggleCollectionDynamic( + @Cast("const bool") boolean enable, + @Const @ByRef ActivityTypeSet activities); + +/** + * When a C++ thread really has no control over how the profiler was enabled, + * for example, by some unreachable Python code, it can call these functions + * to test/join/unjoin itself into the collection set of a profiler, if any. + * Without calling these functions, the symptom may be "not seeing GPU events + * from some child C++ threads". This is an example on how to use them, + * + * using namespace torch::autograd::profiler; + * bool enabled = isProfilerEnabledInMainThread(); + * if (enabled != saved_enabled_state) { + * if (enabled) { + * enableProfilerInChildThread(); + * } else { + * disableProfilerInChildThread(); + * } + * saved_enabled_state = enabled; + * } + */ +@Namespace("torch::autograd::profiler") public static native @Cast("bool") boolean isProfilerEnabledInMainThread(); +@Namespace("torch::autograd::profiler") public static native void enableProfilerInChildThread(); +@Namespace("torch::autograd::profiler") public static native void disableProfilerInChildThread(); + // namespace autograd::profiler // Experimental. @@ -72024,8 +72106,8 @@ scalar_t sf(scalar_t x, scalar_t y) // #pragma once // #include -// #include // #include +// #include // #include @@ -75623,6 +75705,8 @@ scalar_t sf(scalar_t x, scalar_t y) * or in * {@code }T x B x *{@code } otherwise * padding_value (double, optional): value for padded elements. Default: 0. + * padding_side (str, optional): the side to pad the sequences on. Default: + * "right". * * Returns: * Tensor of size {@code }T x B x *{@code } if {@code batch_first} is {@code }false{@code }. @@ -76207,22 +76291,22 @@ scalar_t sf(scalar_t x, scalar_t y) /* How do we decide whether to serialize undefined tensors or - c10::nullopt values into the output archive? + std::nullopt values into the output archive? Answer: we strictly follow the behavior of Python API. To be more specific: For optimizer options: a) For undefined tensor: currently no tensor is used as an options argument in -Python API, so we don't need to worry about it now. b) For c10::nullopt value: -we serialize c10::nullopt values into the output archive, to follow the exact +Python API, so we don't need to worry about it now. b) For std::nullopt value: +we serialize std::nullopt values into the output archive, to follow the exact same behavior as Python API. For optimizer param state: a) For undefined tensor: in param state, undefined tensor in C++ impl is equivalent to missing key in Python impl. Since we don't serialize missing keys in Python API, we skip undefined tensors when serializing the param state. b) -For c10::nullopt value: in param state, c10::nullopt value in C++ impl is +For std::nullopt value: in param state, std::nullopt value in C++ impl is equivalent to missing key in Python impl. Since we don't serialize missing keys -in Python API, we skip c10::nullopt values when serializing the param state. */ +in Python API, we skip std::nullopt values when serializing the param state. */ /** Serializes an {@code Optimizer} into an {@code OutputArchive}. */ @Namespace("torch::optim") public static native @ByRef @Name("operator <<") OutputArchive shiftLeft( @@ -77737,14 +77821,14 @@ scalar_t sf(scalar_t x, scalar_t y) public static final int TORCH_VERSION_MAJOR = 2; /** Indicates the minor version of LibTorch. */ -public static final int TORCH_VERSION_MINOR = 4; +public static final int TORCH_VERSION_MINOR = 5; /** Indicates the patch version of LibTorch. */ public static final int TORCH_VERSION_PATCH = 0; /** Indicates the version of LibTorch. */ public static final String TORCH_VERSION = - "2.4.0"; + "2.5.0"; // Parsed from torch/csrc/api/include/torch/xpu.h @@ -77969,8 +78053,7 @@ scalar_t sf(scalar_t x, scalar_t y) - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/frontend/script_type_parser.h @@ -77983,8 +78066,7 @@ scalar_t sf(scalar_t x, scalar_t y) // Targeting ../ScriptTypeParser.java - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/frontend/resolver.h @@ -78002,16 +78084,14 @@ scalar_t sf(scalar_t x, scalar_t y) @Namespace("torch::jit") public static native @SharedPtr NativeResolver nativeResolver(); - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/frontend/sugared_value.h // #pragma once -// #include -// #include // #include +// #include // #include // #include @@ -78106,22 +78186,19 @@ scalar_t sf(scalar_t x, scalar_t y) - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/frontend/error_report.h // #pragma once -// #include // #include // Targeting ../Call.java - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/frontend/tree.h @@ -78168,8 +78245,7 @@ scalar_t sf(scalar_t x, scalar_t y) @Namespace("torch::jit") public static native @Cast("std::ostream*") @ByRef @Name("operator <<") Pointer shiftLeft(@Cast("std::ostream*") @ByRef Pointer out, @IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree t); - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/frontend/lexer.h @@ -78189,9 +78265,6 @@ scalar_t sf(scalar_t x, scalar_t y) // #include // #include -// #if C10_CLANG_HAS_WARNING("-Wshorten-64-to-32") -// #endif - // single character tokens are just the character itself '+' // multi-character tokens need an entry here // if the third entry is not the empty string, it is used @@ -78417,9 +78490,7 @@ scalar_t sf(scalar_t x, scalar_t y) // Targeting ../Token.java - // namespace jit - // namespace torch - + // namespace torch::jit // Parsed from caffe2/serialize/inline_container.h @@ -78544,13 +78615,12 @@ scalar_t sf(scalar_t x, scalar_t y) // #include // #include - // namespace serialize - // namespace caffe2 + // namespace caffe2::serialize @Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( @SharedPtr("torch::jit::CompilationUnit") @ByVal CompilationUnit cu, @StdString BytePointer filename, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device, + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device, @Cast("bool") boolean load_debug_files/*=true*/); @Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( @SharedPtr("torch::jit::CompilationUnit") @ByVal CompilationUnit cu, @@ -78558,7 +78628,7 @@ scalar_t sf(scalar_t x, scalar_t y) @Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( @SharedPtr("torch::jit::CompilationUnit") @ByVal CompilationUnit cu, @StdString String filename, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device, + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device, @Cast("bool") boolean load_debug_files/*=true*/); @Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( @SharedPtr("torch::jit::CompilationUnit") @ByVal CompilationUnit cu, @@ -78567,7 +78637,7 @@ scalar_t sf(scalar_t x, scalar_t y) @Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( @SharedPtr("torch::jit::CompilationUnit") @ByVal CompilationUnit cu, @Cast("std::istream*") @ByRef Pointer in, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device, + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device, @Cast("bool") boolean load_debug_files/*=true*/); @Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( @SharedPtr("torch::jit::CompilationUnit") @ByVal CompilationUnit cu, @@ -78576,7 +78646,7 @@ scalar_t sf(scalar_t x, scalar_t y) @Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( @SharedPtr("torch::jit::CompilationUnit") @ByVal CompilationUnit cu, @UniquePtr ReadAdapterInterface rai, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device, + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device, @Cast("bool") boolean load_debug_files/*=true*/); @Namespace("torch::jit") public static native @ByVal JitModule import_ir_module( @SharedPtr("torch::jit::CompilationUnit") @ByVal CompilationUnit cu, @@ -78652,7 +78722,7 @@ scalar_t sf(scalar_t x, scalar_t y) * {@code torch::jit::ExportModule} in C++. */ @Namespace("torch::jit") public static native @ByVal JitModule load( @Cast("std::istream*") @ByRef Pointer in, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device, + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device, @Cast("bool") boolean load_debug_files/*=true*/); @Namespace("torch::jit") public static native @ByVal JitModule load( @Cast("std::istream*") @ByRef Pointer in); @@ -78676,13 +78746,13 @@ scalar_t sf(scalar_t x, scalar_t y) * Python or {@code torch::jit::ExportModule} in C++. */ @Namespace("torch::jit") public static native @ByVal JitModule load( @StdString BytePointer filename, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device, + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device, @Cast("bool") boolean load_debug_files/*=true*/); @Namespace("torch::jit") public static native @ByVal JitModule load( @StdString BytePointer filename); @Namespace("torch::jit") public static native @ByVal JitModule load( @StdString String filename, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device, + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device, @Cast("bool") boolean load_debug_files/*=true*/); @Namespace("torch::jit") public static native @ByVal JitModule load( @StdString String filename); @@ -78715,7 +78785,7 @@ scalar_t sf(scalar_t x, scalar_t y) * Python or {@code torch::jit::ExportModule} in C++. */ @Namespace("torch::jit") public static native @ByVal JitModule load( @SharedPtr("caffe2::serialize::ReadAdapterInterface") @ByVal ReadAdapterInterface rai, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device, + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device, @Cast("bool") boolean load_debug_files/*=true*/); @Namespace("torch::jit") public static native @ByVal JitModule load( @SharedPtr("caffe2::serialize::ReadAdapterInterface") @ByVal ReadAdapterInterface rai); @@ -78740,7 +78810,7 @@ scalar_t sf(scalar_t x, scalar_t y) @Cast("char*") @SharedPtr BytePointer data, @Cast("size_t") long size, @ByRef ExtraFilesMap extra_files, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); @Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( @Cast("char*") @SharedPtr BytePointer data, @Cast("size_t") long size, @@ -78749,7 +78819,7 @@ scalar_t sf(scalar_t x, scalar_t y) @Cast("char*") @SharedPtr ByteBuffer data, @Cast("size_t") long size, @ByRef ExtraFilesMap extra_files, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); @Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( @Cast("char*") @SharedPtr ByteBuffer data, @Cast("size_t") long size, @@ -78758,7 +78828,7 @@ scalar_t sf(scalar_t x, scalar_t y) @Cast("char*") @SharedPtr byte[] data, @Cast("size_t") long size, @ByRef ExtraFilesMap extra_files, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); @Namespace("torch::jit") public static native @ByVal JitModule parse_and_initialize_jit_module( @Cast("char*") @SharedPtr byte[] data, @Cast("size_t") long size, @@ -78767,14 +78837,14 @@ scalar_t sf(scalar_t x, scalar_t y) @Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file( @StdString BytePointer filename, @ByRef ExtraFilesMap extra_files, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); @Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file( @StdString BytePointer filename, @ByRef ExtraFilesMap extra_files); @Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file( @StdString String filename, @ByRef ExtraFilesMap extra_files, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); @Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_file( @StdString String filename, @ByRef ExtraFilesMap extra_files); @@ -78782,7 +78852,7 @@ scalar_t sf(scalar_t x, scalar_t y) @Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_stream( @Cast("std::istream*") @ByRef Pointer in, @ByRef ExtraFilesMap extra_files, - @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device); + @ByVal(nullValue = "std::optional(std::nullopt)") DeviceOptional device); @Namespace("torch::jit") public static native @ByVal JitModule load_jit_module_from_stream( @Cast("std::istream*") @ByRef Pointer in, @ByRef ExtraFilesMap extra_files); @@ -78791,8 +78861,7 @@ scalar_t sf(scalar_t x, scalar_t y) @Const @ByRef StrongTypePtr type, @ByVal IValue input); - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from c10/util/FbcodeMaps.h @@ -78976,16 +79045,14 @@ scalar_t sf(scalar_t x, scalar_t y) @Const @ByRef Tensor t, @Const @ByRef GenericDict metadata_idict); - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/frontend/parser_constants.h // #pragma once @Namespace("torch::jit") public static native @Cast("const char*") BytePointer valid_single_char_tokens(); public static native void valid_single_char_tokens(BytePointer setter); - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/frontend/strtod.h @@ -79009,8 +79076,7 @@ scalar_t sf(scalar_t x, scalar_t y) @Namespace("torch::jit") public static native float strtof_c(@Cast("const char*") BytePointer nptr, @Cast("char**") @ByPtrPtr ByteBuffer endptr); @Namespace("torch::jit") public static native float strtof_c(String nptr, @Cast("char**") @ByPtrPtr byte[] endptr); - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/frontend/schema_matching.h @@ -79033,7 +79099,7 @@ scalar_t sf(scalar_t x, scalar_t y) @ByRef Graph graph, @ByVal NamedValueArrayRef args, @ByVal NamedValueArrayRef kwargs, - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") NamedValueOptional self); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") NamedValueOptional self); @Namespace("torch::jit") public static native @ByVal MatchedSchema matchSchema( @Const @ByRef FunctionSchema schema, @Const @ByRef SourceRange loc, @@ -79047,7 +79113,7 @@ scalar_t sf(scalar_t x, scalar_t y) @ByRef Graph graph, @ByVal NamedValueArrayRef args, @ByVal NamedValueArrayRef kwargs, - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") NamedValueOptional self, + @Const @ByRef(nullValue = "std::optional(std::nullopt)") NamedValueOptional self, @Cast("bool") boolean render_errors/*=false*/); @Namespace("torch::jit") public static native @ByVal SizeTMatchedSchemaPair matchSchemas( @Const @ByRef FunctionSchemaVector schemas, @@ -79068,7 +79134,7 @@ scalar_t sf(scalar_t x, scalar_t y) @ByVal Symbol name, @ByVal NamedValueArrayRef args, @ByVal NamedValueArrayRef kwargs, - @Const @ByRef(nullValue = "std::optional(c10::nullopt)") NamedValueOptional self); + @Const @ByRef(nullValue = "std::optional(std::nullopt)") NamedValueOptional self); @Namespace("torch::jit") public static native Value emitBuiltinCall( @Const @ByRef SourceRange loc, @ByRef Graph graph, @@ -79099,8 +79165,7 @@ scalar_t sf(scalar_t x, scalar_t y) @Const @ByRef Type.TypePtr concrete_type, Value value, @Cast("bool") boolean allow_conversions); - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/frontend/versioned_symbols.h @@ -79120,14 +79185,12 @@ scalar_t sf(scalar_t x, scalar_t y) // Maps the given kind to the minimum version that supports it. // See note [Dynamic Versions and torch.jit.save vs. torch.save] @Namespace("torch::jit") public static native @Cast("uint64_t") long get_min_version_for_kind(@Cast("const torch::jit::NodeKind*") @ByRef Symbol kind); - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/jit/frontend/tree_views.h // #pragma once -// #include // #include // #include // #include @@ -79363,8 +79426,7 @@ scalar_t sf(scalar_t x, scalar_t y) // For example, ((x | y) | z) is transformed into Union[x, y, z]. @Namespace("torch::jit") public static native @ByVal Expr pep604union_to_union(@Const @ByRef Expr expr); - // namespace jit - // namespace torch + // namespace torch::jit // namespace std @@ -79443,6 +79505,10 @@ scalar_t sf(scalar_t x, scalar_t y) /** Deserialize a {@code torch::IValue} from bytes produced by either * {@code torch::pickle_save} in C++ or {@code torch.save} in Python */ +/** Deserialize a {@code torch::IValue} from bytes produced by either + * {@code torch::pickle_save} in C++ or {@code torch.save} in Python with custom object. */ + + /** {@code reader} is a function that takes in a size to read from some pickled * binary. {@code reader} should remember where it last read, and return * the number of bytes read. @@ -79556,9 +79622,11 @@ scalar_t sf(scalar_t x, scalar_t y) TypeParser type_parser/*=torch::jit::Unpickler::defaultTypeParser*/); // #ifndef C10_MOBILE +// Targeting ../StringViewReader.java + + // #endif - // namespace jit - // namespace torch + // namespace torch::jit // Parsed from torch/csrc/inductor/aoti_runner/model_container_runner.h @@ -79568,6 +79636,7 @@ scalar_t sf(scalar_t x, scalar_t y) // #include // #include +// #include // Targeting ../DynamicLibrary.java @@ -79576,6 +79645,7 @@ scalar_t sf(scalar_t x, scalar_t y) + // namespace torch::inductor // #endif @@ -79606,8 +79676,8 @@ scalar_t sf(scalar_t x, scalar_t y) // #include // #include -// callback function will be given arguments (optional oldValue, -// optional newValue) +// callback function will be given arguments (std::optional oldValue, +// std::optional newValue) // Targeting ../Store.java @@ -79937,8 +80007,6 @@ scalar_t sf(scalar_t x, scalar_t y) // Since SOCKET_ERROR = -1 in MSVC, so also leverage SYSCHECK_ERR_RETURN_NEG1 // #define SYSCHECK_ERR_RETURN_NEG1(expr) SYSCHECK(expr, __output != -1) - - // Send and receive // send a vector's length and data diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java index 204a1b4fd91..31aae196dd6 100644 --- a/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java +++ b/pytorch/src/gen/java/org/bytedeco/pytorch/global/torch_cuda.java @@ -127,9 +127,10 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // #define STRINGIFY(x) #x // #define STRING(x) STRINGIFY(x) -// #if CUDNN_MAJOR < 6 -// #pragma message ("CuDNN v" STRING(CUDNN_MAJOR) " found, but need at least CuDNN v6. You can get the latest version of CuDNN from https://developer.nvidia.com/cudnn or disable CuDNN with USE_CUDNN=0") -// #pragma message "We strongly encourage you to move to 6.0 and above." +// #if CUDNN_MAJOR < 8 || (CUDNN_MAJOR == 8 && CUDNN_MINOR < 5) +// #pragma message("CuDNN v" STRING( +// CUDNN_MAJOR) " found, but need at least CuDNN v8. You can get the latest version of CuDNN from https://developer.nvidia.com/cudnn or disable CuDNN with USE_CUDNN=0") +// #pragma message "We strongly encourage you to move to 8.5 and above." // #pragma message "This message is intended to annoy you enough to update." // #endif @@ -464,6 +465,9 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // #include // #endif +// #if defined(USE_CUDSS) +// #endif + // #if defined(USE_ROCM) // #endif @@ -525,6 +529,9 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // #if defined(CUDART_VERSION) || defined(USE_ROCM) @Namespace("at::cuda") public static native cusolverDnContext getCurrentCUDASolverDnHandle(); +// #endif + +// #if defined(USE_CUDSS) // #endif // namespace at::cuda @@ -662,6 +669,9 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // #include // #endif +// #if defined(USE_CUDSS) +// #endif + // #include // #include // #include @@ -718,6 +728,11 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // " when calling `" #EXPR "`"); // } while (0) +// #if defined(USE_CUDSS) +// #else +// #define TORCH_CUDSS_CHECK(EXPR) EXPR +// #endif + // cusolver related headers are only supported on cuda now // #ifdef CUDART_VERSION @Namespace("at::cuda::solver") public static native @Cast("const char*") BytePointer cusolverGetErrorMessage(@Cast("cusolverStatus_t") int status); @@ -843,8 +858,8 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // #pragma once -// #include // #include +// #include @Namespace("at::native") public static native cudnnContext getCudnnHandle(); // namespace at::native @@ -856,8 +871,8 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // #include // #include -// #include // #include +// #include // cuDNN has a buggy check for tensor being contiguous (that is, it does // not ignore stride for dimension that is equal to 0). This function @@ -865,7 +880,7 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // strides to 1 as cuDNN likes. @Namespace("at::native") public static native @ByVal Tensor contiguousIfZeroInStrides(@Const @ByRef Tensor t); - + // namespace at::native // Parsed from c10/cuda/CUDAGraphsC10Utils.h @@ -898,11 +913,45 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // namespace c10::cuda -// Parsed from c10/cuda/CUDACachingAllocator.h +// Parsed from c10/core/CachingDeviceAllocator.h // #pragma once // #include +// #include + +// #include +// Targeting ../cuda/Stat.java + + + +@Namespace("c10::CachingDeviceAllocator") public enum StatType { + AGGREGATE(0), + SMALL_POOL(1), + LARGE_POOL(2), + NUM_TYPES(3);// remember to update this whenever a new stat type is added + + public final long value; + private StatType(long v) { this.value = v; } + private StatType(StatType e) { this.value = e.value; } + public StatType intern() { for (StatType e : values()) if (e.value == value) return e; return this; } + @Override public String toString() { return intern().name(); } +} +// Targeting ../cuda/DeviceStats.java + + + +// Size pretty-printer +@Namespace("c10::CachingDeviceAllocator") public static native @StdString BytePointer format_size(@Cast("uint64_t") long size); + + // namespace c10::CachingDeviceAllocator + + +// Parsed from c10/cuda/CUDACachingAllocator.h + +// #pragma once + +// #include // #include // #include // #include @@ -941,24 +990,8 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // not counted as a word boundary, so you would otherwise have to list each // of these functions. - -// Targeting ../cuda/Stat.java - - - -@Namespace("c10::cuda::CUDACachingAllocator") public enum StatType { - AGGREGATE(0), - SMALL_POOL(1), - LARGE_POOL(2), - NUM_TYPES(3);// remember to update this whenever a new stat type is added - - public final long value; - private StatType(long v) { this.value = v; } - private StatType(StatType e) { this.value = e.value; } - public StatType intern() { for (StatType e : values()) if (e.value == value) return e; return this; } - @Override public String toString() { return intern().name(); } -} -// Targeting ../cuda/DeviceStats.java +// Preserved only for BC reasons +// NOLINTNEXTLINE(misc-unused-using-decls) // Targeting ../cuda/BlockInfo.java @@ -976,6 +1009,9 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // Targeting ../cuda/TraceEntry.java +// Targeting ../cuda/AnnotationEntry.java + + // Targeting ../cuda/AllocatorConfigInfo.java @@ -998,8 +1034,8 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { public RecordContext intern() { for (RecordContext e : values()) if (e.value == value) return e; return this; } @Override public String toString() { return intern().name(); } } +// Targeting ../cuda/ShareableHandle.java -// Size pretty-printer // Targeting ../cuda/CUDAAllocator.java @@ -1055,6 +1091,9 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { +@Namespace("c10::cuda::CUDACachingAllocator") public static native void recordAnnotation( + @StdVector StringPair md); + @@ -1067,6 +1106,8 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // Not part of CUDA_ALLOCATOR_BACKEND_INTERFACE +@Namespace("c10::cuda::CUDACachingAllocator") public static native @ByVal ShareableHandle shareIpcHandle(Pointer ptr); + @@ -1074,6 +1115,14 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // namespace c10::cuda::CUDACachingAllocator +// Targeting ../cuda/MemPool.java + + +// Targeting ../cuda/MemPoolContext.java + + + + // namespace c10::cuda // Parsed from c10/cuda/impl/CUDAGuardImpl.h @@ -1094,9 +1143,9 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // #include // #include // #include -// #include // #include // #include +// #include // namespace c10::cuda::impl @@ -1133,15 +1182,15 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { // #pragma once -// #include // #include +// #include @Namespace("at::native") public static native @Cast("cudnnDataType_t") int getCudnnDataTypeFromScalarType(ScalarType dtype); - // namespace at::cudnn + // namespace at::native // Parsed from ATen/cudnn/Descriptors.h @@ -1221,7 +1270,7 @@ public class torch_cuda extends org.bytedeco.pytorch.presets.torch_cuda { - // namespace + // namespace // Parsed from ATen/cuda/CUDAEvent.h diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/cuda/AllocatorTraceTracker.java b/pytorch/src/main/java/org/bytedeco/pytorch/cuda/AllocatorTraceTracker.java index 6743a6fc8dd..d6957bb4759 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/cuda/AllocatorTraceTracker.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/cuda/AllocatorTraceTracker.java @@ -4,9 +4,8 @@ import org.bytedeco.javacpp.Loader; import org.bytedeco.javacpp.Pointer; import org.bytedeco.javacpp.annotation.ByRef; -import org.bytedeco.javacpp.annotation.Const; +import org.bytedeco.javacpp.annotation.Cast; import org.bytedeco.javacpp.annotation.Properties; -import org.bytedeco.pytorch.cuda.TraceEntry; @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class) public class AllocatorTraceTracker extends FunctionPointer { @@ -28,5 +27,5 @@ protected AllocatorTraceTracker() { private native void allocate(); // std::function - public native void call(@Const @ByRef TraceEntry e); + public native void call(@ByRef @Cast("const c10::cuda::CUDACachingAllocator::TraceEntry*") Pointer e); } diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/gloo.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/gloo.java index 043a975db04..94210d42be8 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/gloo.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/gloo.java @@ -23,6 +23,7 @@ import org.bytedeco.javacpp.ClassProperties; import org.bytedeco.javacpp.LoadEnabled; +import org.bytedeco.javacpp.annotation.Platform; import org.bytedeco.javacpp.annotation.Properties; import org.bytedeco.javacpp.presets.chrono; import org.bytedeco.javacpp.tools.*; @@ -31,7 +32,12 @@ * @author Hervé Guillemet */ @Properties( - inherit = { torch.class, chrono.class }, + inherit = torch.class, + value = @Platform( + include = { + "torch/csrc/distributed/c10d/ProcessGroupGloo.hpp", + } + ), target = "org.bytedeco.pytorch.gloo", global = "org.bytedeco.pytorch.global.gloo" ) diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java index 1f451327d0c..5c827ddd2e2 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch.java @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020-2023 Hervé Guillemet, Samuel Audet, Eduardo Gonzalez + * Copyright (C) 2020-2024 Hervé Guillemet, Samuel Audet, Eduardo Gonzalez * * Licensed either under the Apache License, Version 2.0, or (at your option) * under the terms of the GNU General Public License as published by @@ -55,7 +55,7 @@ * @author Samuel Audet, Hervé Guillemet */ @Properties( - inherit = { openblas.class, chrono.class }, + inherit = {openblas.class, chrono.class}, value = { @Platform( value = {"linux", "macosx", "windows"}, @@ -280,14 +280,14 @@ public static void sharedMap(InfoMap infoMap) { infoMap .put(new Info().enumerate().friendly()) .put(new Info("auto", "c10::reverse_iterator", "ska::flat_hash_map", /*"std::atomic", */"std::conditional", "std::iterator_traits", - "std::initializer_list", "std::integral_constant", "std::mutex", "std::reverse_iterator" /*, "std::weak_ptr"*/).skip()) + "std::initializer_list", "std::integral_constant", "std::mutex", "std::nullopt_t", "std::reverse_iterator" /*, "std::weak_ptr"*/).skip()) .put(new Info("basic/containers").cppTypes("torch::optional")) ; //// Macros infoMap - .put(new Info("TORCH_API", "C10_API", "TORCH_XPU_API", "C10_EXPORT", "C10_HIDDEN", "C10_IMPORT", "C10_API_ENUM", "EXPORT_IF_NOT_GCC", - "TORCH_CUDA_CU_API", "TORCH_CUDA_CPP_API", "TORCH_HIP_API", "TORCH_PYTHON_API", + .put(new Info("TORCH_API", "C10_API", "TORCH_XPU_API", "C10_EXPORT", "C10_HIDDEN", "C10_IMPORT", "C10_API_ENUM", "C10_UNUSED", + "EXPORT_IF_NOT_GCC", "TORCH_CUDA_CU_API", "TORCH_CUDA_CPP_API", "TORCH_HIP_API", "TORCH_PYTHON_API", "TORCH_UNUSED_EXCEPT_CUDA", "__ubsan_ignore_float_divide_by_zero__", "__ubsan_ignore_undefined__", "__ubsan_ignore_signed_int_overflow__", "__ubsan_ignore_function__", "C10_CLANG_DIAGNOSTIC_IGNORE", "C10_CLANG_DIAGNOSTIC_PUSH", "C10_CLANG_DIAGNOSTIC_POP", "C10_ATTR_VISIBILITY_HIDDEN", "C10_ERASE", "C10_UID", "C10_NODISCARD", "C10_UNUSED", "C10_USED", "C10_RESTRICT", "C10_NOINLINE", "C10_ALWAYS_INLINE", "C10_FALLTHROUGH", @@ -304,12 +304,14 @@ public static void sharedMap(InfoMap infoMap) { "__ubsan_ignore_function__").cppTypes().annotations()) .put(new Info("defined(__CUDACC__) || defined(__HIPCC__)", + "defined(__HIPCC__) && defined(USE_ROCM)", "defined(__CUDACC__) && !defined(USE_ROCM)", "defined(SYCL_EXT_ONEAPI_BFLOAT16_MATH_FUNCTIONS)", "defined(_MSC_VER) && _MSC_VER <= 1900", "defined(NDEBUG)", "defined(__ANDROID__)", "defined(__APPLE__)", + "defined(__aarch64__) && !defined(__CUDACC__)", "defined(__aarch64__) && !defined(C10_MOBILE) && !defined(__CUDACC__)", "defined(__HIP_PLATFORM_HCC__)", "defined(_MSC_VER)", "_WIN32", @@ -337,6 +339,7 @@ public void map(InfoMap infoMap) { sharedMap(infoMap); infoMap + .put(new Info("model_container_runner.h").linePatterns("using CreateAOTIModelRunnerFunc.*", "};").skip()) .put(new Info("ordered_dict.h").linePatterns(".*class Item;.*").skip()) .put(new Info("util.h").linePatterns(".*using approx_time_t = decltype.*").skip()) @@ -380,6 +383,7 @@ public void map(InfoMap infoMap) { .put(new Info("std::aligned_storage_t").pointerTypes("Pointer")) .put(new Info("c10::requires_grad", "at::range", "at::bernoulli_out", "at::normal_out", "at::stft").skipDefaults()) .put(new Info("c10::prim::requires_grad").javaNames("requires_grad")) + .put(new Info("c10::BFloat16::allocate", "c10::IValue::allocate").javaNames("_allocate")) .put(new Info("c10::aten::clone").javaNames("_clone")) .put(new Info("at::TensorBase").base("AbstractTensor").pointerTypes("TensorBase")) .put(new Info("torch::autograd::Variable").pointerTypes("Tensor")) @@ -457,6 +461,7 @@ public void map(InfoMap infoMap) { .put(new Info("std::optional").pointerTypes("FunctionSchemaOptional").define()) .put(new Info("std::optional", "std::optional").pointerTypes("SymDimVectorOptional").define()) .put(new Info("std::optional").pointerTypes("SymIntOptional").define()) + .put(new Info("std::vector >").pointerTypes("SymIntOptionalVector").define()) .put(new Info("std::optional").pointerTypes("IValueOptional").define()) .put(new Info("std::optional").pointerTypes("DimVectorOptional").define()) .put(new Info("std::optional").pointerTypes("DimnameOptional").define()) @@ -488,8 +493,8 @@ public void map(InfoMap infoMap) { .put(new Info("std::optional >").pointerTypes("T_StringSizeTSizeT_TOptional").define()) .put(new Info("torch::optional >").pointerTypes("T_TensorTensor_TOptional").define()) .put(new Info("std::optional >", "std::optional >").pointerTypes("T_TypePtrLong_TOptional").cast().define()) - .put(new Info("std::optional").pointerTypes("StringViewOptional").define()) - .put(new Info("std::optional >").pointerTypes("StringViewVectorOptional").define()) + .put(new Info("std::optional", "std::optional").pointerTypes("StringViewOptional").define()) + .put(new Info("std::optional >", "std::optional >").pointerTypes("StringViewVectorOptional").define()) .put(new Info("std::optional >", "std::optional >")/*.cast?*/.pointerTypes("PointerPairOptional").define()) .put(new Info("std::optional > >", "std::optional >").pointerTypes("WeakStorageVectorOptional").define()) .put(new Info("std::optional").pointerTypes("CppSignatureOptional").define()) @@ -658,7 +663,7 @@ public void map(InfoMap infoMap) { .put(new Info("std::vector").cast().pointerTypes("DoubleVector").define()) .put(new Info("std::vector").cast().pointerTypes("SizeTVector").define()) .put(new Info("std::vector").pointerTypes("StringVector").define()) - .put(new Info("std::vector").pointerTypes("StringViewVector").define()) + .put(new Info("std::vector", "std::vector").pointerTypes("StringViewVector").define()) .put(new Info("std::vector >").pointerTypes("StringLongVector").define()) .put(new Info("std::vector", "torch::jit::Stack").pointerTypes("IValueVector").define()) .put(new Info("std::vector::const_iterator", "torch::jit::Stack::const_iterator").pointerTypes("IValueVector.Iterator")) @@ -907,6 +912,7 @@ public void map(InfoMap infoMap) { .put(new Info("std::unordered_map").pointerTypes("ExtraFilesMap").define()) .put(new Info("std::unordered_map").pointerTypes("TypeEnv").define()) .put(new Info("std::unordered_map", "std::unordered_map").pointerTypes("StringIValueMap").define()) + .put(new Info("std::unordered_map").pointerTypes("StringScalarTypeMap").define()) .put(new Info("std::unordered_map").pointerTypes("StringValueMap").define()) .put(new Info("std::unordered_map").pointerTypes("ValueValueMap").define()) .put(new Info("std::unordered_map").pointerTypes("ArgumentSpecExecutionPlanMap").define()) @@ -1061,6 +1067,7 @@ public void map(InfoMap infoMap) { .put(new Info("std::pair").pointerTypes("SizeTMatchedSchemaPair").define()) .put(new Info("std::pair").pointerTypes("BytePointerPair").define()) .put(new Info("std::pair").pointerTypes("EnumNameValue").define()) + .put(new Info("std::pair").pointerTypes("StringPair").define()) .put(new Info("std::pair").pointerTypes("IntPair").define()) ; @@ -1153,7 +1160,7 @@ public void map(InfoMap infoMap) { "torch::jit::SourceRangeDeserializer::deserialize", "torch::jit::SourceRangePickler::pickle", "torch::jit::Pickler::pushEmptyDict", "torch::jit::PrintDepsTable::add", "torch::jit::printerHasSpecialCaseFor", "ONNX_NAMESPACE::ModelProto", "torch::jit::export_onnx", "torch::jit::Function::call", "torch::jit::GraphFunction::call", "torch::jit::GraphFunction::function_creator", "torch::jit::getOptionsFromGlobal", - "torch::jit::serialize_model_proto_to_string", "torch::onnx::IR_VERSION", "torch::onnx::PRODUCER_VERSION", + "torch::jit::pickle_load_obj", "torch::jit::serialize_model_proto_to_string", "torch::onnx::IR_VERSION", "torch::onnx::PRODUCER_VERSION", "TORCH_DISALLOW_TEMPORARIES", "TORCH_DISALLOW_TEMPORARIES_IMPL", // Issue #674 "DEFINE_CASTING(TAG, ...)", "TORCH_ILISTREF_FORALL_TAGS", "torch::autograd::GraphTask::ExecInfo::Capture::DO_NOT_USE_DEPRECATED_get_capture_hooks", @@ -1928,6 +1935,7 @@ public void map(InfoMap infoMap) { /* TODO: see how to map these, if needed and meant to be part of API */ infoMap.put(new Info("c10::MaybeOwnedTraitsGenericImpl >::assignBorrow", "c10::MaybeOwnedTraitsGenericImpl >::destroyBorrow", + "c10::InitEventSampledHandlers", "c10::GetEventSampledHandler", "c10::EventSampledHandler", "torch::autograd::profiler::ProfilerResult", "torch::profiler::impl::ProfilerEventStub", "torch::autograd::profiler::enableProfiler", "torch::autograd::profiler::enableProfilerWithEventPostProcess", "torch::profiler::impl::ProfilerStateBase", "torch::profiler::impl::ProfilerStubs", "torch::autograd::profiler::KinetoEvent", @@ -1949,6 +1957,10 @@ public void map(InfoMap infoMap) { "torch::autograd::get_current_graph_task_exec_info", // Would need to map GraphTask, NodeExec...too much burden + "torch::inductor::CreateAOTIModelRunnerFunc", + "torch::inductor::RegisterAOTIModelRunner", + "torch::inductor::getAOTIModelRunnerRegistry", + "torch::Library::def", // Could not figure out how to map shared_ptr of std::function @@ -2253,6 +2265,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset //// c10::string_view infoMap.put(new Info("c10::basic_string_view", "c10::string_view").annotations("@StringView").valueTypes("BytePointer", "String")); + infoMap.put(new Info("std::string_view").valueTypes("@Cast(\"const char*\") BytePointer", "String")); // Registries. // Skipped them for now. Much burden with variadic args and creator function pointers. @@ -2437,6 +2450,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "c10::guts::typelist::concat,c10::guts::typelist::typelist<> ><>", // Persing error ? "c10::hash >::tuple_hash<0> >", "c10::hash >::tuple_hash >", + "c10::guard_size_oblivious", "c10::impl::AnnotatedSchema", "c10::impl::ListElementConstReferenceTraits >", "c10::impl::SizesAndStrides::", @@ -2444,6 +2458,8 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "c10::impl::decay_if_not_tensor", "c10::impl::is_mutable_tensor_ref", "c10::in_place_t", + "c10::impl::ptr_to_first_element", + "c10::impl::op_allowlist_check", "c10::ivalue::ComplexHolder", "c10::ivalue::StreamData3Holder", "c10::ivalue::TupleElements::", @@ -2533,7 +2549,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "torch::profiler::impl::ProfilerVoidEventStub", "torch::autograd::_jvp_fn_t", "torch::autograd::profiler::post_process_t", - "at::StringView" // Confusion with string_view and @StringView, and doesn't seem to be of any use in API + "torch::jit::StringViewReader::read", "at::StringView" // Confusion with string_view and @StringView, and doesn't seem to be of any use in API ).skip()) ; @@ -2546,6 +2562,8 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "at::TensorBase::TensorBase(c10::intrusive_ptr)", // "should not be used by end users" "at::TensorIteratorBase::apply_perm_and_mul", "at::assert_no_partial_overlap(c10::TensorImpl*, c10::TensorImpl*)", + "at::impl::MetaBase::set_output_strided", + "at::impl::MetaBase::set_output_raw_strided", "at::impl::VariableHooksInterface::_register_hook", "at::native::construct_nested_strides", // Not exported "at::native::construct_offsets", // Not exported @@ -2565,6 +2583,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "c10d::checkForNan", // Not exported "c10d::Logger::operator <<(std::ostream&, const c10d::Logger&)", // No definition "c10d::ProcessGroupGloo::createProcessGroupGloo", // No definition + "c10::TensorImpl::set_allow_tensor_metadata_change", "caffe2::serialize::detail::getPadding", "torch::autograd::add_node_to_current_graph_task_exec_info", "torch::autograd::set_device(int)", @@ -2590,6 +2609,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "_object", "PyObject", "THPObjectPtr", "pyobj_list", "std::exception_ptr", "std::type_info", "std::pair", "std::stack >", "torch::autograd::utils::DelayWarningHandler", + "std::pair", "std::stack >", "std::is_same,torch::detail::pack >", "at::cuda::NVRTC", "at::RecordFunctionCallback", "at::StepCallbacks", "THCState", "THHState", "torch::jit::InlinedCallStackPtr", "InlinedCallStackPtr", "torch::jit::ScopePtr", "torch::jit::BackendDebugInfoRecorder", "torch::detail::TensorDataContainer", "at::ArrayRef", @@ -2599,6 +2619,7 @@ We need either to put an annotation info on each member, or javaName("@NoOffset "std::optional", "c10::intrusive_ptr", "c10::intrusive_ptr", "c10::ArrayRef >", + "c10::ArrayRef", "torch::jit::DetachedBuffer::UniqueDetachedBuffer", "std::optional", "std::optional::ListOfOptionalElements>", "std::optional::ListOfOptionalElements>", "std::optional >", diff --git a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java index 7fea5a0c6ca..655623f7a7c 100644 --- a/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java +++ b/pytorch/src/main/java/org/bytedeco/pytorch/presets/torch_cuda.java @@ -1,5 +1,5 @@ /* - * Copyright (C) 2023 Hervé Guillemet + * Copyright (C) 2023-2024 Hervé Guillemet * * Licensed either under the Apache License, Version 2.0, or (at your option) * under the terms of the GNU General Public License as published by @@ -37,7 +37,7 @@ * @author Hervé Guillemet */ @Properties( - inherit = { torch.class, cudnn.class, cusparse.class, cusolver.class, cupti.class }, + inherit = {torch.class, cudnn.class, cusparse.class, cusolver.class, cupti.class}, value = { @Platform( extension = "-gpu", @@ -240,7 +240,7 @@ public void map(InfoMap infoMap) { infoMap .put(new Info("USE_CUDNN_RNN_V8_API").define()) // Using CuDNN 8.9.7 or more recent - .put(new Info("defined(IS_NCCL_EXP) && defined(NCCL_COMM_DUMP)").define(false)) + .put(new Info("defined(USE_CUDSS)", "defined(IS_NCCL_EXP) && defined(NCCL_COMM_DUMP)").define(false)) ; //// Different C++ API between platforms diff --git a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h index 280f36fc877..6a6f722cc55 100644 --- a/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h +++ b/pytorch/src/main/resources/org/bytedeco/pytorch/presets/torch_cuda_include.h @@ -22,10 +22,11 @@ #include "ATen/cudnn/Handle.h" #include "ATen/cudnn/Utils.h" #include "c10/cuda/CUDAGraphsC10Utils.h" +#include "c10/core/CachingDeviceAllocator.h", #include "c10/cuda/CUDACachingAllocator.h", #include "c10/cuda/impl/CUDAGuardImpl.h" #include "c10/cuda/CUDAGuard.h" #include "ATen/cudnn/Types.h" #include "ATen/cudnn/Descriptors.h" #include "ATen/cuda/CUDAEvent.h" -#include "torch/csrc/inductor/aoti_runner/model_container_runner_cuda.h" \ No newline at end of file +#include "torch/csrc/inductor/aoti_runner/model_container_runner_cuda.h"