diff --git a/.github/workflows/pytorch.yml b/.github/workflows/pytorch.yml
index c0916d62689..7fa7e6dbf13 100644
--- a/.github/workflows/pytorch.yml
+++ b/.github/workflows/pytorch.yml
@@ -33,7 +33,7 @@ jobs:
- uses: bytedeco/javacpp-presets/.github/actions/deploy-ubuntu@actions
timeout-minutes: 350
macosx-arm64:
- runs-on: macos-12
+ runs-on: macos-14
steps:
- uses: bytedeco/javacpp-presets/.github/actions/deploy-macosx@actions
macosx-x86_64:
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bf7e65d7f3d..03da7f5cec2 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,4 +1,5 @@
+ * Enable distributed package using Gloo in presets for PyTorch ([pull #1510](https://github.com/bytedeco/javacpp-presets/pull/1510))
* Add presets for the CUPTI module of CUDA ([pull #1531](https://github.com/bytedeco/javacpp-presets/pull/1531))
* Add new `ClangMemoryMgmtExample` in samples for LLVM ([pull #1522](https://github.com/bytedeco/javacpp-presets/pull/1522))
* Enable `opencv_python3` module for `macosx-arm64` as well ([pull #1517](https://github.com/bytedeco/javacpp-presets/pull/1517))
@@ -8,7 +9,7 @@
* Build FFmpeg with zimg to enable zscale filter ([pull #1481](https://github.com/bytedeco/javacpp-presets/pull/1481))
* Enable PulseAudio support for FFmpeg on Linux ([pull #1472](https://github.com/bytedeco/javacpp-presets/pull/1472))
* Virtualize `btCollisionWorld`, `btOverlapFilterCallback`, `btOverlapCallback` from Bullet Physics SDK ([pull #1475](https://github.com/bytedeco/javacpp-presets/pull/1475))
- * Upgrade presets for OpenCV 4.10.0, FFmpeg 7.0.2, Spinnaker 4.0.0.116 ([pull #1524](https://github.com/bytedeco/javacpp-presets/pull/1524)), DNNL 3.5.3, OpenBLAS 0.3.28, CMINPACK 1.3.9, GSL 2.8, CPython 3.12.5, NumPy 2.0.1, SciPy 1.14.0, LLVM 18.1.8, LibRaw 0.21.2 ([pull #1520](https://github.com/bytedeco/javacpp-presets/pull/1520)), Tesseract 5.4.1, libffi 3.4.6, CUDA 12.6.0, cuDNN 9.3.0, NCCL 2.22.3, nvCOMP 4.0.0, OpenCL 3.0.16, NVIDIA Video Codec SDK 12.2.72, PyTorch 2.3.0 ([pull #1466](https://github.com/bytedeco/javacpp-presets/pull/1466)), SentencePiece 0.2.0, TensorFlow Lite 2.17.0, TensorRT 10.3.0.26, Triton Inference Server 2.48.0, ONNX 1.16.2, ONNX Runtime 1.18.1, TVM 0.17.0, and their dependencies
+ * Upgrade presets for OpenCV 4.10.0, FFmpeg 7.0.2, Spinnaker 4.0.0.116 ([pull #1524](https://github.com/bytedeco/javacpp-presets/pull/1524)), DNNL 3.5.3, OpenBLAS 0.3.28, CMINPACK 1.3.9, GSL 2.8, CPython 3.12.5, NumPy 2.0.1, SciPy 1.14.0, LLVM 18.1.8, LibRaw 0.21.2 ([pull #1520](https://github.com/bytedeco/javacpp-presets/pull/1520)), Tesseract 5.4.1, libffi 3.4.6, CUDA 12.6.0, cuDNN 9.3.0, NCCL 2.22.3, nvCOMP 4.0.0, OpenCL 3.0.16, NVIDIA Video Codec SDK 12.2.72, PyTorch 2.4.0 ([pull #1466](https://github.com/bytedeco/javacpp-presets/pull/1466)), SentencePiece 0.2.0, TensorFlow Lite 2.17.0, TensorRT 10.3.0.26, Triton Inference Server 2.48.0, ONNX 1.16.2, ONNX Runtime 1.18.1, TVM 0.17.0, and their dependencies
### January 29, 2024 version 1.5.10
* Introduce `macosx-arm64` builds for PyTorch ([pull #1463](https://github.com/bytedeco/javacpp-presets/pull/1463))
diff --git a/README.md b/README.md
index 5e94db787db..5be8c141bdc 100644
--- a/README.md
+++ b/README.md
@@ -223,7 +223,7 @@ Each child module in turn relies by default on the included [`cppbuild.sh` scrip
* NVIDIA Video Codec SDK 12.2.x https://developer.nvidia.com/nvidia-video-codec-sdk
* OpenCL 3.0.x https://github.com/KhronosGroup/OpenCL-ICD-Loader
* MXNet 1.9.x https://github.com/apache/incubator-mxnet
- * PyTorch 2.3.x https://github.com/pytorch/pytorch
+ * PyTorch 2.4.x https://github.com/pytorch/pytorch
* SentencePiece 0.2.0 https://github.com/google/sentencepiece
* TensorFlow 1.15.x https://github.com/tensorflow/tensorflow
* TensorFlow Lite 2.17.x https://github.com/tensorflow/tensorflow
diff --git a/platform/pom.xml b/platform/pom.xml
index 0805036ba00..1f42863c6cc 100644
--- a/platform/pom.xml
+++ b/platform/pom.xml
@@ -292,7 +292,7 @@
org.bytedeco
pytorch-platform
- 2.3.0-${project.version}
+ 2.4.0-${project.version}
org.bytedeco
diff --git a/pytorch/README.md b/pytorch/README.md
index 8b28b87b9a7..e5cccb5525b 100644
--- a/pytorch/README.md
+++ b/pytorch/README.md
@@ -9,7 +9,7 @@ Introduction
------------
This directory contains the JavaCPP Presets module for:
- * PyTorch 2.3.0 https://pytorch.org/
+ * PyTorch 2.4.0 https://pytorch.org/
Please refer to the parent README.md file for more detailed information about the JavaCPP Presets.
@@ -48,14 +48,14 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic
org.bytedeco
pytorch-platform
- 2.3.0-1.5.11-SNAPSHOT
+ 2.4.0-1.5.11-SNAPSHOT
org.bytedeco
pytorch-platform-gpu
- 2.3.0-1.5.11-SNAPSHOT
+ 2.4.0-1.5.11-SNAPSHOT
diff --git a/pytorch/cppbuild.sh b/pytorch/cppbuild.sh
index 1d805c3af39..ee70d1d7c66 100755
--- a/pytorch/cppbuild.sh
+++ b/pytorch/cppbuild.sh
@@ -22,6 +22,9 @@ export USE_CUDNN=0
export USE_NUMPY=0
export USE_OPENMP=1
export USE_SYSTEM_NCCL=1
+export USE_DISTRIBUTED=1
+export USE_NCCL=0 # Not supported on Windows
+
if [[ "$EXTENSION" == *gpu ]]; then
export USE_CUDA=1
export USE_CUDNN=1
@@ -35,7 +38,7 @@ if [[ $PLATFORM == windows* ]]; then
export PYTHON_BIN_PATH=$(which python.exe)
fi
-PYTORCH_VERSION=2.3.0
+PYTORCH_VERSION=2.4.0
export PYTORCH_BUILD_VERSION="$PYTORCH_VERSION"
export PYTORCH_BUILD_NUMBER=1
@@ -44,6 +47,23 @@ mkdir -p "$PLATFORM$EXTENSION"
cd "$PLATFORM$EXTENSION"
INSTALL_PATH=`pwd`
+# Distributed needs libuv on Windows (on other platforms, it's included in tensorpipe)
+if [[ $PLATFORM == windows* ]]; then
+ if [[ ! -d libuv ]]; then
+ mkdir libuv
+ cd libuv
+ download https://dist.libuv.org/dist/v1.39.0/libuv-v1.39.0.tar.gz libuv.tgz
+ tar xfz libuv.tgz
+ mkdir build
+ cd build
+ cmake ../libuv-v1.39.0 -DBUILD_TESTING=OFF
+ cmake --build . --config Release
+ cmake --install . --config Release --prefix ../dist
+ cd ../..
+ fi
+ export libuv_ROOT=${INSTALL_PATH}/libuv/dist
+fi
+
if [[ ! -d pytorch ]]; then
git clone https://github.com/pytorch/pytorch
fi
@@ -123,7 +143,7 @@ case $PLATFORM in
macosx-arm64)
export CC="clang"
export CXX="clang++"
- export CMAKE_OSX_ARCHITECTURES=arm64 # enable cross-compilation on a x86_64 host machine
+ # export PATH=$(brew --prefix llvm@18)/bin:$PATH # Use brew LLVM instead of Xcode LLVM 14
export USE_MKLDNN=OFF
export USE_QNNPACK=OFF # not compatible with arm64 as of PyTorch 2.1.2
export CMAKE_OSX_DEPLOYMENT_TARGET=11.00 # minimum needed for arm64 support
@@ -131,6 +151,8 @@ case $PLATFORM in
macosx-x86_64)
export CC="clang"
export CXX="clang++"
+ export USE_MKLDNN=OFF
+ # export PATH=$(brew --prefix llvm@18)/bin:$PATH # Use brew LLVM instead of Xcode LLVM 14
;;
windows-x86_64)
if which ccache.exe; then
@@ -181,22 +203,53 @@ TORCH_API std::ostream& operator<<(std::ostream& stream, const nn::Module& modul
' torch/csrc/api/include/torch/nn/module.h
sedinplace 's/char(\(.*\))/\1/g' torch/csrc/jit/serialization/pickler.h
+# some windows header defines a macro named "interface"
+sedinplace 's/const std::string& interface)/const std::string\& interface_name)/g' torch/csrc/distributed/c10d/ProcessGroupGloo.hpp
+
+# fix missing #include (Pytorch 2.4.0)
+sedinplace 's/#include /#include \
+#include \
+#include /' torch/csrc/distributed/c10d/control_plane/Handlers.cpp
+
+# Remove pytorch adaptations of FindOpenMP.cmake that.
+# On Windows without iomp and with new versions of VS 2019, including -openmp:experimental and libomp, causes
+# final binary to be linked to both libomp and vcomp and produce incorrect results.
+# Wait for eventual upstream fix, or for cmake 2.30 that allows to choose between -openmp and -openmp:experimental
+# and see if choosing experimental works. See Issue #1503.
+# On Linux, pytorch FindOpenMP.cmake picks llvm libomp over libgomp. See Issue #1504.
+# On MacOS CMake standard version works tooL
+rm cmake/Modules/FindOpenMP.cmake
+sedinplace 's/include(${CMAKE_CURRENT_LIST_DIR}\/Modules\/FindOpenMP.cmake)/find_package(OpenMP)/g' cmake/Dependencies.cmake
+
#USE_FBGEMM=0 USE_KINETO=0 USE_GLOO=0 USE_MKLDNN=0 \
"$PYTHON_BIN_PATH" setup.py build
rm -Rf ../lib
+if [[ ! -e torch/include/gloo ]]; then
+ ln -sf ../../third_party/gloo/gloo torch/include
+fi
ln -sf pytorch/torch/include ../include
ln -sf pytorch/torch/lib ../lib
ln -sf pytorch/torch/bin ../bin
-# fix library with correct rpath on Mac
case $PLATFORM in
macosx-*)
- cp /usr/local/lib/libomp.dylib ../lib/libiomp5.dylib
+ # Disguise libomp as libiomp5 (they share the same codebase and have the same symbols)
+ # This helps if user wants to link with MKL.
+ # On linux, user linking with mkl would need to set
+ # MKL_THREADING_LAYER=GNU
+ cp "$(brew ls libomp|grep libomp.dylib)" ../lib/libiomp5.dylib
chmod +w ../lib/libiomp5.dylib
install_name_tool -id @rpath/libiomp5.dylib ../lib/libiomp5.dylib
- install_name_tool -change @rpath/libomp.dylib @rpath/libiomp5.dylib ../lib/libtorch_cpu.dylib
+ codesign --force -s - ../lib/libiomp5.dylib
+ old=$(otool -L ../lib/libtorch_cpu.dylib|grep libomp.dylib|awk '{print $1}')
+ echo install_name_tool -change $old @rpath/libiomp5.dylib ../lib/libtorch_cpu.dylib
+ install_name_tool -change $old @rpath/libiomp5.dylib ../lib/libtorch_cpu.dylib
+ codesign --force -s - ../lib/libtorch_cpu.dylib
;;
+ windows-*)
+ cp ../libuv/dist/lib/Release/* ../lib
+ ;;
esac
cd ../..
diff --git a/pytorch/include_list.pl b/pytorch/include_list.pl
index a91ad04e216..5c01133ab15 100644
--- a/pytorch/include_list.pl
+++ b/pytorch/include_list.pl
@@ -18,7 +18,7 @@ ($)
for (my $d = @inc_per_depth - 1; $d >= $min_depth; $d--) {
if ($inc_per_depth[$d]) {
foreach my $i (@{$inc_per_depth[$d]}) {
- print "#include \"$i\"\n";
+ print "#include \"$i\"\n" unless $incs{$i};
$incs{$i} = 1;
}
undef $inc_per_depth[$d];
@@ -27,12 +27,20 @@ ($)
}
sub go {
- my $path = join ' ', @_;
+ my ($roots, $opts) = @_;
+ my $path = join ' ', @$roots, @$opts;
+
+ my $exe = "g++ -I. -I torch/csrc/api/include/ -DUSE_UCC -DUSE_C10D_GLOO -DUSE_C10D_MPI -DUSE_DISTRIBUTED -H $path -E 2>&1 > /dev/null";
+ #my $exe = "g++ -I. -I torch/csrc/api/include/ -DUSE_UCC -DUSE_C10D_GLOO -DUSE_C10D_MPI -DUSE_DISTRIBUTED -D_WIN32 -H $path -E 2>&1 > /dev/null";
+ my @inc = `$exe`;
+ if ($? != 0) {
+ print STDERR "Failed:\n$exe\nError: $?: $!\n";
+ exit $?;
+ }
- my @inc = `g++ -I. -I torch/csrc/api/include/ -H $path -E 2>&1 > /dev/null`;
foreach my $i (@inc) {
chomp $i;
- my ($depth, $f) = $i =~ /^(\.+)\s(.*\.h)$/;
+ my ($depth, $f) = $i =~ /^(\.+)\s(.*\.h(?:pp)?)$/;
next unless $depth;
$depth = length($depth);
$f =~ s#^\./##;
@@ -48,18 +56,33 @@ sub go {
push @$incs, $f;
}
flush(0);
+ foreach my $i (@$roots) {
+ print "#include \"$i\"\n" unless $incs{$i};
+ $incs{$i} = 1;
+ }
}
chdir "cppbuild/linux-x86_64-gpu/pytorch/torch/include";
-go('torch/csrc/api/include/torch/torch.h', 'torch/script.h', 'torch/csrc/inductor/aoti_runner/model_container_runner_cpu.h');
+print <org.bytedeco
pytorch-platform-gpu
- 2.3.0-${project.parent.version}
+ 2.4.0-${project.parent.version}
JavaCPP Presets Platform GPU for PyTorch
diff --git a/pytorch/platform/pom.xml b/pytorch/platform/pom.xml
index 7ebc0809dc6..a3ab5725b2d 100644
--- a/pytorch/platform/pom.xml
+++ b/pytorch/platform/pom.xml
@@ -12,7 +12,7 @@
org.bytedeco
pytorch-platform
- 2.3.0-${project.parent.version}
+ 2.4.0-${project.parent.version}
JavaCPP Presets Platform for PyTorch
@@ -41,6 +41,12 @@
${project.version}
${javacpp.platform.linux-x86_64}
+
+ ${project.groupId}
+ ${javacpp.moduleId}
+ ${project.version}
+ ${javacpp.platform.macosx-arm64}
+
${project.groupId}
${javacpp.moduleId}
@@ -65,7 +71,7 @@
- ${javacpp.moduleId}.jar ${javacpp.moduleId}-linux-x86_64.jar ${javacpp.moduleId}-macosx-x86_64.jar ${javacpp.moduleId}-windows-x86_64.jar
+ ${javacpp.moduleId}.jar ${javacpp.moduleId}-linux-x86_64.jar ${javacpp.moduleId}-macosx-arm64.jar ${javacpp.moduleId}-macosx-x86_64.jar ${javacpp.moduleId}-windows-x86_64.jar
@@ -111,6 +117,7 @@
module org.bytedeco.${javacpp.moduleId}.platform {
requires static org.bytedeco.${javacpp.moduleId}.linux.x86_64;
+ requires static org.bytedeco.${javacpp.moduleId}.macosx.arm64;
requires static org.bytedeco.${javacpp.moduleId}.macosx.x86_64;
requires static org.bytedeco.${javacpp.moduleId}.windows.x86_64;
}
diff --git a/pytorch/pom.xml b/pytorch/pom.xml
index 8f722424484..9335ad36cd8 100644
--- a/pytorch/pom.xml
+++ b/pytorch/pom.xml
@@ -11,7 +11,7 @@
org.bytedeco
pytorch
- 2.3.0-${project.parent.version}
+ 2.4.0-${project.parent.version}
JavaCPP Presets for PyTorch
@@ -24,6 +24,12 @@
openblas
0.3.28-${project.parent.version}
+
+ org.bytedeco
+ cuda
+ 12.6-9.3-${project.parent.version}
+ true
+
@@ -43,6 +49,11 @@
openblas-platform
0.3.28-${project.parent.version}
+
+ org.bytedeco
+ cuda-platform
+ 12.6-9.3-${project.parent.version}
+
org.bytedeco
numpy-platform
@@ -60,6 +71,7 @@
${basedir}/../openblas/target/classes/
${basedir}/../cpython/target/classes/
${basedir}/../numpy/target/classes/
+ ${basedir}/../cuda/target/classes/
${project.build.outputDirectory}
diff --git a/pytorch/samples/pom.xml b/pytorch/samples/pom.xml
index e22da3ab5b5..ef136d7088d 100644
--- a/pytorch/samples/pom.xml
+++ b/pytorch/samples/pom.xml
@@ -12,14 +12,14 @@
org.bytedeco
pytorch-platform
- 2.3.0-1.5.11-SNAPSHOT
+ 2.4.0-1.5.11-SNAPSHOT
org.bytedeco
pytorch-platform-gpu
- 2.3.0-1.5.11-SNAPSHOT
+ 2.4.0-1.5.11-SNAPSHOT
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunner.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunner.java
index 315a3bb11ad..df70a32da98 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunner.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunner.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -35,9 +36,9 @@ public class AOTIModelContainerRunner extends Pointer {
public native @ByVal ExtraFilesMap getConstantNamesToOriginalFQNs();
public native @ByVal StringIntMap getConstantNamesToDtypes();
- public native void update_inactive_constant_buffer(@Cast("const torch::inductor::TensorConstantMap*") @ByRef HashAliasedIValueMap const_map);
+ public native void update_inactive_constant_buffer(@Cast("const torch::inductor::TensorConstantMap*") @ByRef SizeTStringMap const_map);
public native void update_constant_buffer(
- @Cast("const torch::inductor::TensorConstantMap*") @ByRef HashAliasedIValueMap const_map,
+ @Cast("const torch::inductor::TensorConstantMap*") @ByRef SizeTStringMap const_map,
@Cast("bool") boolean use_inactive,
@Cast("bool") boolean validate_full_updates);
public native void run_const_fold(
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCpu.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCpu.java
index 245736a92bb..76e7b1fcc4d 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCpu.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AOTIModelContainerRunnerCpu.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ASMoutput.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ASMoutput.java
index 16a89281a22..c5401cd7f4d 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ASMoutput.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ASMoutput.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AcceleratorHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AcceleratorHooksInterface.java
index ba443287ab4..9fb904a24ff 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AcceleratorHooksInterface.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AcceleratorHooksInterface.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -34,4 +35,14 @@ public class AcceleratorHooksInterface extends Pointer {
// Whether the device at device_index is fully initialized or not.
public native @Cast("bool") boolean hasPrimaryContext(@Cast("c10::DeviceIndex") byte device_index);
+
+ public native @Cast("c10::DeviceIndex") byte deviceCount();
+
+ public native void setCurrentDevice(@Cast("c10::DeviceIndex") byte device);
+
+ public native @Cast("c10::DeviceIndex") byte getCurrentDevice();
+
+ public native @Cast("c10::DeviceIndex") byte exchangeDevice(@Cast("c10::DeviceIndex") byte device);
+
+ public native @Cast("c10::DeviceIndex") byte maybeExchangeDevice(@Cast("c10::DeviceIndex") byte device);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTypeSet.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTypeSet.java
index a439f4848b5..a8a5afb0263 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTypeSet.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTypeSet.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java
index 01a7f182ea6..c20bfebb1ba 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java
index 04ff7eeb15d..7e8f958d7a6 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
// namespace torch
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java
index 359ee78b6c3..17b1c8f7d2b 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdagradParamState.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java
index 9b3ddfe5273..04e03f32b03 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java
index a466026a740..0117a4e490c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
// namespace torch
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java
index d944ca047fe..985da3871c8 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java
index 44de72027df..bf27f6b2d6a 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java
index e80fb9128bd..85331179736 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
// namespace torch
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java
index 16efed25070..efdbb70c953 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdamWParamState.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java
index f8c1bb8caf7..445378866ce 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveAvgPool1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies adaptive avgpool over a 1-D input.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.AdaptiveAvgPool1d to
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.AdaptiveAvgPool1d to
* learn about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::AdaptiveAvgPool1dOptions} class to
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java
index 40b08a1ce43..127191932c6 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java
index 48dfd2fbd0c..6cd0d41375f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class AdaptiveAvgPool1dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dOptions.java
index d5c58a579ad..bb74f77f391 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool1dOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
// namespace functional
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java
index d810f6bb144..d6d32e09206 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveAvgPool2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies adaptive avgpool over a 2-D input.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.AdaptiveAvgPool2d to
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.AdaptiveAvgPool2d to
* learn about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::AdaptiveAvgPool2dOptions} class to
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java
index 276f3a01156..c9a9c2155e1 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java
index 7ddff12187a..014f7184f1b 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class AdaptiveAvgPool2dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dOptions.java
index e51d5d8c1fc..e3d8cefb60d 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool2dOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java
index 0b4b3af9720..bf7938f53d2 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveAvgPool3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies adaptive avgpool over a 3-D input.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.AdaptiveAvgPool3d to
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.AdaptiveAvgPool3d to
* learn about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::AdaptiveAvgPool3dOptions} class to
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java
index 5f1a5b4e6e3..057e7e6e1ad 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java
index 0133810ada4..680a8c29567 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class AdaptiveAvgPool3dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dOptions.java
index 71020055604..76b2760febd 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveAvgPool3dOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java
index eda1d3583fc..9231dc376db 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -25,7 +26,7 @@
* {@code Efficient softmax approximation for GPUs}_ by Edouard Grave, Armand Joulin,
* Moustapha Cissé, David Grangier, and Hervé Jégou.
* See
- * https://pytorch.org/docs/master/nn.html#torch.nn.AdaptiveLogSoftmaxWithLoss
+ * https://pytorch.org/docs/main/nn.html#torch.nn.AdaptiveLogSoftmaxWithLoss
* to learn about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::AdaptiveLogSoftmaxWithLossOptions}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java
index af51f4e06e1..a1dfcbd3a2d 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class AdaptiveLogSoftmaxWithLossImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossOptions.java
index ca1256eafa8..29188bd2c2c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveLogSoftmaxWithLossOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java
index 81010dd00cc..ab941e60976 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveMaxPool1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies adaptive maxpool over a 1-D input.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.AdaptiveMaxPool1d to
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.AdaptiveMaxPool1d to
* learn about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::AdaptiveMaxPool1dOptions} class to
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java
index 495fa7d06c0..2b04569bc1a 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java
index 418391fb82f..7182a820e06 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class AdaptiveMaxPool1dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dOptions.java
index 0b41b14b6ee..20cbb8d016a 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool1dOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
// namespace functional
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java
index 1843987daea..dbf4b012431 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveMaxPool2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies adaptive maxpool over a 2-D input.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.AdaptiveMaxPool2d to
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.AdaptiveMaxPool2d to
* learn about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::AdaptiveMaxPool2dOptions} class to
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java
index 273ea117a34..068bd073015 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java
index c524e3d8712..1145fd54eb9 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class AdaptiveMaxPool2dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dOptions.java
index 2b9b8f5b633..97ffbd0f81f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool2dOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java
index ef3cec1183d..b7848ec8740 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AdaptiveMaxPool3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies adaptive maxpool over a 3-D input.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.AdaptiveMaxPool3d to
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.AdaptiveMaxPool3d to
* learn about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::AdaptiveMaxPool3dOptions} class to
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java
index 85d7563ae1e..9282af1448f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java
index 0815e37d3c7..c73ce111401 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class AdaptiveMaxPool3dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dOptions.java
index d52a037fca0..14d5e25ba4c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AdaptiveMaxPool3dOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasDb.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasDb.java
index 8595ece7434..ee525ef48bc 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasDb.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasDb.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
// namespace utils
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfo.java
index f8c3f056ccc..0646d0a1f3c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfo.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfo.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfoOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfoOptional.java
index c91646faf20..a9346b6a9e4 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfoOptional.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasInfoOptional.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,10 +13,12 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
-@NoOffset @Name("c10::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@NoOffset @Name("std::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class AliasInfoOptional extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasTypeSetOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasTypeSetOptional.java
index 0e80109be7f..506d71476c0 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AliasTypeSetOptional.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AliasTypeSetOptional.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,10 +13,12 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
-@NoOffset @Name("c10::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@NoOffset @Name("std::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class AliasTypeSetOptional extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AllToAllOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AllToAllOptions.java
new file mode 100644
index 00000000000..fdba5130684
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AllToAllOptions.java
@@ -0,0 +1,41 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+@Namespace("c10d") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class AllToAllOptions extends Pointer {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public AllToAllOptions() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public AllToAllOptions(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public AllToAllOptions(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public AllToAllOptions position(long position) {
+ return (AllToAllOptions)super.position(position);
+ }
+ @Override public AllToAllOptions getPointer(long i) {
+ return new AllToAllOptions((Pointer)this).offsetAddress(i);
+ }
+
+ public native @ByRef Milliseconds timeout(); public native AllToAllOptions timeout(Milliseconds setter);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AllgatherOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AllgatherOptions.java
new file mode 100644
index 00000000000..f980224301f
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AllgatherOptions.java
@@ -0,0 +1,42 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+@Namespace("c10d") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class AllgatherOptions extends Pointer {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public AllgatherOptions() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public AllgatherOptions(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public AllgatherOptions(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public AllgatherOptions position(long position) {
+ return (AllgatherOptions)super.position(position);
+ }
+ @Override public AllgatherOptions getPointer(long i) {
+ return new AllgatherOptions((Pointer)this).offsetAddress(i);
+ }
+
+ public native @ByRef Milliseconds timeout(); public native AllgatherOptions timeout(Milliseconds setter);
+ public native @Cast("bool") boolean asyncOp(); public native AllgatherOptions asyncOp(boolean setter);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java
index f6dc7870f9d..a9ae2b30865 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Allocator.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DontIncreaseRefcount.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AllreduceCoalescedOptions.java
similarity index 66%
rename from pytorch/src/gen/java/org/bytedeco/pytorch/DontIncreaseRefcount.java
rename to pytorch/src/gen/java/org/bytedeco/pytorch/AllreduceCoalescedOptions.java
index ea5a7e645bd..3d8e2f2dc43 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/DontIncreaseRefcount.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AllreduceCoalescedOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,15 +13,16 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
-// constructor tag used by intrusive_ptr constructors
-@Namespace("c10::raw") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
-public class DontIncreaseRefcount extends Pointer {
+@Namespace("c10d") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class AllreduceCoalescedOptions extends AllreduceOptions {
/** Empty constructor. Calls {@code super((Pointer)null)}. */
- public DontIncreaseRefcount() { super((Pointer)null); }
+ public AllreduceCoalescedOptions() { super((Pointer)null); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public DontIncreaseRefcount(Pointer p) { super(p); }
+ public AllreduceCoalescedOptions(Pointer p) { super(p); }
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AllreduceOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AllreduceOptions.java
new file mode 100644
index 00000000000..f44646cc98f
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AllreduceOptions.java
@@ -0,0 +1,43 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+@Namespace("c10d") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class AllreduceOptions extends Pointer {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public AllreduceOptions() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public AllreduceOptions(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public AllreduceOptions(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public AllreduceOptions position(long position) {
+ return (AllreduceOptions)super.position(position);
+ }
+ @Override public AllreduceOptions getPointer(long i) {
+ return new AllreduceOptions((Pointer)this).offsetAddress(i);
+ }
+
+ public native @ByRef @NoOffset ReduceOp reduceOp(); public native AllreduceOptions reduceOp(ReduceOp setter);
+ public native @ByRef @NoOffset Milliseconds timeout(); public native AllreduceOptions timeout(Milliseconds setter);
+ public native @ByRef @NoOffset TensorOptional sparseIndices(); public native AllreduceOptions sparseIndices(TensorOptional setter);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutFuncOptions.java
index 13a6855649f..45001967ae0 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutFuncOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutFuncOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java
index 8aa6f8c4e9b..9f91172110c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AlphaDropout ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies Alpha Dropout over the input.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.AlphaDropout to learn
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.AlphaDropout to learn
* about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::AlphaDropoutOptions} class to learn
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java
index 18fab886e05..7ef9562d602 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java
index 859ea089357..db38d7884c4 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AlphaDropoutImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class AlphaDropoutImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMetadata.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMetadata.java
index 78e2a22497e..c2884e536c8 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMetadata.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMetadata.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMode.java
index c5574cb4df9..858438ff56b 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMode.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnomalyMode.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassType.java
index 31dc3152958..c8e8cf4c960 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassType.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassType.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassTypePtr.java
index 16a3faca35f..42c6165e3f7 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassTypePtr.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyClassTypePtr.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumType.java
index d3312cd32db..38cd010e51b 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumType.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumType.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumTypePtr.java
index 7ff7901fdf4..f7eee44556e 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumTypePtr.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyEnumTypePtr.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListType.java
index a039e827418..b99bb58cc8c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListType.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListType.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListTypePtr.java
index f25a6d33ee6..fc0fb12f3dd 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListTypePtr.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyListTypePtr.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java
index 3f97cc39eec..3991d3564c1 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModule.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -390,7 +391,7 @@ public class AnyModule extends Pointer {
/** Creates a deep copy of an {@code AnyModule} if it contains a module, else an
* empty {@code AnyModule} if it is empty. */
- public native @ByVal AnyModule clone(@ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ public native @ByVal AnyModule clone(@ByVal(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @ByVal AnyModule clone();
/** Assigns a module to the {@code AnyModule} (to circumvent the explicit
@@ -406,9 +407,9 @@ public class AnyModule extends Pointer {
public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4);
public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4, @Const @ByRef Tensor input5, @Const @ByRef Tensor input6);
public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4, @Const @ByRef Tensor input5, @Const @ByRef Tensor input6, @Const @ByRef Tensor input7, @Const @ByRef Tensor input8);
- public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input, @ByRef(nullValue = "c10::optional(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size);
- public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") LongArrayRefOptional output_size);
- public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input, @Const @ByRef Tensor indices, @Const @ByRef(nullValue = "c10::optional >(c10::nullopt)") LongVectorOptional output_size);
+ public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input, @ByRef(nullValue = "std::optional(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size);
+ public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input, @Const @ByRef(nullValue = "std::optional(c10::nullopt)") LongArrayRefOptional output_size);
+ public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input, @Const @ByRef Tensor indices, @Const @ByRef(nullValue = "std::optional >(c10::nullopt)") LongVectorOptional output_size);
public native @ByVal AnyValue any_forward(@Const @ByRef Tensor input, @ByVal(nullValue = "torch::optional >{}") T_TensorTensor_TOptional hx_opt);
public native @ByVal AnyValue any_forward(@Const @ByRef Tensor query, @Const @ByRef Tensor key, @Const @ByRef Tensor value, @Const @ByRef(nullValue = "torch::Tensor{}") Tensor key_padding_mask, @Cast("bool") boolean need_weights/*=true*/, @Const @ByRef(nullValue = "torch::Tensor{}") Tensor attn_mask, @Cast("bool") boolean average_attn_weights/*=true*/);
@@ -421,9 +422,9 @@ public class AnyModule extends Pointer {
public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4);
public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4, @Const @ByRef Tensor input5, @Const @ByRef Tensor input6);
public native @ByVal Tensor forward(@Const @ByRef Tensor input1, @Const @ByRef Tensor input2, @Const @ByRef Tensor input3, @Const @ByRef Tensor input4, @Const @ByRef Tensor input5, @Const @ByRef Tensor input6, @Const @ByRef Tensor input7, @Const @ByRef Tensor input8);
- public native @ByVal Tensor forward(@Const @ByRef Tensor input, @ByRef(nullValue = "c10::optional(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size);
- public native @ByVal Tensor forward(@Const @ByRef Tensor input, @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") LongArrayRefOptional output_size);
- public native @ByVal Tensor forward(@Const @ByRef Tensor input, @Const @ByRef Tensor indices, @Const @ByRef(nullValue = "c10::optional >(c10::nullopt)") LongVectorOptional output_size);
+ public native @ByVal Tensor forward(@Const @ByRef Tensor input, @ByRef(nullValue = "std::optional(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size);
+ public native @ByVal Tensor forward(@Const @ByRef Tensor input, @Const @ByRef(nullValue = "std::optional(c10::nullopt)") LongArrayRefOptional output_size);
+ public native @ByVal Tensor forward(@Const @ByRef Tensor input, @Const @ByRef Tensor indices, @Const @ByRef(nullValue = "std::optional >(c10::nullopt)") LongVectorOptional output_size);
public native @ByVal @Name("forward>>") T_TensorT_TensorTensor_T_T forwardT_TensorT_TensorTensor_T_T(@Const @ByRef Tensor input);
public native @ByVal @Name("forward>>") T_TensorT_TensorTensor_T_T forwardT_TensorT_TensorTensor_T_T(@Const @ByRef Tensor input, @ByVal(nullValue = "torch::optional >{}") T_TensorTensor_TOptional hx_opt);
public native @ByVal @Name("forward>") T_TensorTensor_T forwardT_TensorTensor_T(@Const @ByRef Tensor input);
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModuleVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModuleVector.java
index a386153cdd3..f4106958483 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModuleVector.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyModuleVector.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleType.java
index 65d89929367..0e6cf7df191 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleType.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleType.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleTypePtr.java
index 66ff484d421..c3542add493 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleTypePtr.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTupleTypePtr.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyType.java
index 19a4af193fa..56793933e85 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyType.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyType.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTypePtr.java
index da7ffc8590e..ae897e5d258 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTypePtr.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyTypePtr.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyValue.java
index 7af1d36c38f..ddd85274423 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AnyValue.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AnyValue.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Apply.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Apply.java
index d24b10f427d..03fdf06401d 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Apply.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Apply.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -24,8 +25,8 @@ public class Apply extends Expr {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Apply(Pointer p) { super(p); }
- public Apply(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); }
- private native void allocate(@Const @ByRef TreeRef tree);
+ public Apply(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree) { super((Pointer)null); allocate(tree); }
+ private native void allocate(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree);
public native @ByVal Expr callee();
public native @ByVal ExprList inputs();
public native @ByVal AttributeList attributes();
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ApproximateClockToUnixTimeConverter.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ApproximateClockToUnixTimeConverter.java
similarity index 86%
rename from pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ApproximateClockToUnixTimeConverter.java
rename to pytorch/src/gen/java/org/bytedeco/pytorch/ApproximateClockToUnixTimeConverter.java
index af3bd4b9195..6162e8b2a4b 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/cuda/ApproximateClockToUnixTimeConverter.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ApproximateClockToUnixTimeConverter.java
@@ -1,14 +1,11 @@
// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
-package org.bytedeco.pytorch.cuda;
+package org.bytedeco.pytorch;
-import org.bytedeco.pytorch.*;
-import org.bytedeco.pytorch.cuda.functions.*;
-import org.bytedeco.pytorch.Error;
-import org.bytedeco.pytorch.global.torch.DeviceType;
-import org.bytedeco.pytorch.global.torch.ScalarType;
-import org.bytedeco.pytorch.global.torch.MemoryFormat;
import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
@@ -16,14 +13,14 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
-import org.bytedeco.pytorch.*;
-import static org.bytedeco.pytorch.global.torch.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
-import static org.bytedeco.pytorch.global.torch_cuda.*;
+import static org.bytedeco.pytorch.global.torch.*;
// Convert `getCount` results to Nanoseconds since unix epoch.
-@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch_cuda.class)
+@Namespace("c10") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ApproximateClockToUnixTimeConverter extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Argument.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Argument.java
index 063e9d0b520..9a807c22489 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Argument.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Argument.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -36,50 +37,50 @@ public class Argument extends Pointer {
public Argument(
@StdString BytePointer name/*=""*/,
@Const @ByRef(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") IntOptional N,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") IValueOptional default_value,
+ @ByVal(nullValue = "std::optional(c10::nullopt)") IntOptional N,
+ @ByVal(nullValue = "std::optional(c10::nullopt)") IValueOptional default_value,
@Cast("bool") boolean kwarg_only/*=false*/,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, type, N, default_value, kwarg_only, alias_info); }
+ @ByVal(nullValue = "std::optional(c10::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, type, N, default_value, kwarg_only, alias_info); }
private native void allocate(
@StdString BytePointer name/*=""*/,
@Const @ByRef(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") IntOptional N,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") IValueOptional default_value,
+ @ByVal(nullValue = "std::optional(c10::nullopt)") IntOptional N,
+ @ByVal(nullValue = "std::optional(c10::nullopt)") IValueOptional default_value,
@Cast("bool") boolean kwarg_only/*=false*/,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") AliasInfoOptional alias_info);
+ @ByVal(nullValue = "std::optional(c10::nullopt)") AliasInfoOptional alias_info);
public Argument() { super((Pointer)null); allocate(); }
private native void allocate();
public Argument(
@StdString String name/*=""*/,
@Const @ByRef(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") IntOptional N,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") IValueOptional default_value,
+ @ByVal(nullValue = "std::optional(c10::nullopt)") IntOptional N,
+ @ByVal(nullValue = "std::optional(c10::nullopt)") IValueOptional default_value,
@Cast("bool") boolean kwarg_only/*=false*/,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, type, N, default_value, kwarg_only, alias_info); }
+ @ByVal(nullValue = "std::optional(c10::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, type, N, default_value, kwarg_only, alias_info); }
private native void allocate(
@StdString String name/*=""*/,
@Const @ByRef(nullValue = "c10::TypePtr(nullptr)") Type.TypePtr type,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") IntOptional N,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") IValueOptional default_value,
+ @ByVal(nullValue = "std::optional(c10::nullopt)") IntOptional N,
+ @ByVal(nullValue = "std::optional(c10::nullopt)") IValueOptional default_value,
@Cast("bool") boolean kwarg_only/*=false*/,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") AliasInfoOptional alias_info);
+ @ByVal(nullValue = "std::optional(c10::nullopt)") AliasInfoOptional alias_info);
public Argument(
@StdString BytePointer name,
@ByVal Type.TypePtr fake_type,
@ByVal Type.TypePtr real_type,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") IntOptional N,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") IValueOptional default_value,
+ @ByVal(nullValue = "std::optional(c10::nullopt)") IntOptional N,
+ @ByVal(nullValue = "std::optional(c10::nullopt)") IValueOptional default_value,
@Cast("bool") boolean kwarg_only/*=false*/,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, fake_type, real_type, N, default_value, kwarg_only, alias_info); }
+ @ByVal(nullValue = "std::optional(c10::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, fake_type, real_type, N, default_value, kwarg_only, alias_info); }
private native void allocate(
@StdString BytePointer name,
@ByVal Type.TypePtr fake_type,
@ByVal Type.TypePtr real_type,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") IntOptional N,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") IValueOptional default_value,
+ @ByVal(nullValue = "std::optional(c10::nullopt)") IntOptional N,
+ @ByVal(nullValue = "std::optional(c10::nullopt)") IValueOptional default_value,
@Cast("bool") boolean kwarg_only/*=false*/,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") AliasInfoOptional alias_info);
+ @ByVal(nullValue = "std::optional(c10::nullopt)") AliasInfoOptional alias_info);
public Argument(
@StdString BytePointer name,
@ByVal Type.TypePtr fake_type,
@@ -92,18 +93,18 @@ public Argument(
@StdString String name,
@ByVal Type.TypePtr fake_type,
@ByVal Type.TypePtr real_type,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") IntOptional N,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") IValueOptional default_value,
+ @ByVal(nullValue = "std::optional(c10::nullopt)") IntOptional N,
+ @ByVal(nullValue = "std::optional(c10::nullopt)") IValueOptional default_value,
@Cast("bool") boolean kwarg_only/*=false*/,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, fake_type, real_type, N, default_value, kwarg_only, alias_info); }
+ @ByVal(nullValue = "std::optional(c10::nullopt)") AliasInfoOptional alias_info) { super((Pointer)null); allocate(name, fake_type, real_type, N, default_value, kwarg_only, alias_info); }
private native void allocate(
@StdString String name,
@ByVal Type.TypePtr fake_type,
@ByVal Type.TypePtr real_type,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") IntOptional N,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") IValueOptional default_value,
+ @ByVal(nullValue = "std::optional(c10::nullopt)") IntOptional N,
+ @ByVal(nullValue = "std::optional(c10::nullopt)") IValueOptional default_value,
@Cast("bool") boolean kwarg_only/*=false*/,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") AliasInfoOptional alias_info);
+ @ByVal(nullValue = "std::optional(c10::nullopt)") AliasInfoOptional alias_info);
public Argument(
@StdString String name,
@ByVal Type.TypePtr fake_type,
@@ -136,7 +137,7 @@ private native void allocate(
public native @StdString BytePointer formatTypeMismatchMsg(@StdString BytePointer actual_type);
public native @StdString String formatTypeMismatchMsg(@StdString String actual_type);
- public native @ByVal Argument cloneWithType(@ByVal Type.TypePtr new_type);
+ public native @ByVal Argument cloneWithType(@Const @ByRef Type.TypePtr new_type);
// this function checks whether this Argument is backward compatible with
// the old one. we consider the following cases are backward compatible:
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentArrayRef.java
index ace579e04af..ccf782c4694 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentArrayRef.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentArrayRef.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java
index 91235fdfa60..bece5c5c64c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDef.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDefArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDefArrayRef.java
index 7d08eb14fa8..e4d0c7d4800 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDefArrayRef.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentDefArrayRef.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentInfo.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentInfo.java
index 7596de57fbe..4161ba4506b 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentInfo.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentInfo.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpec.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpec.java
index 51f1009575c..c6a2e2c372d 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpec.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpec.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecCreator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecCreator.java
index 04e02c3e52b..ea468cd8848 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecCreator.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecCreator.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecExecutionPlanMap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecExecutionPlanMap.java
index c08603e1f7c..0e417c965aa 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecExecutionPlanMap.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ArgumentSpecExecutionPlanMap.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Assert.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Assert.java
index abd978952b7..0c5a9ee876e 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Assert.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Assert.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -24,8 +25,8 @@ public class Assert extends Stmt {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Assert(Pointer p) { super(p); }
- public Assert(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); }
- private native void allocate(@Const @ByRef TreeRef tree);
+ public Assert(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree) { super((Pointer)null); allocate(tree); }
+ private native void allocate(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree);
public native @ByVal Expr test();
public native @ByVal ExprMaybe msg();
public static native @ByVal Assert create(
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Assign.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Assign.java
index b708332cc3c..77b54b7538c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Assign.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Assign.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -24,8 +25,8 @@ public class Assign extends Stmt {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Assign(Pointer p) { super(p); }
- public Assign(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); }
- private native void allocate(@Const @ByRef TreeRef tree);
+ public Assign(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree) { super((Pointer)null); allocate(tree); }
+ private native void allocate(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree);
public static native @ByVal Assign create(
@Const @ByRef SourceRange range,
@Const @ByRef ExprList lhs,
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AssignList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignList.java
index 68282742cdf..a031d5c9a2c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AssignList.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignList.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -25,8 +26,8 @@ public class AssignList extends TreeView {
public AssignList(Pointer p) { super(p); }
- public AssignList(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); }
- private native void allocate(@Const @ByRef TreeRef tree);
+ public AssignList(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree) { super((Pointer)null); allocate(tree); }
+ private native void allocate(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree);
public native @ByVal @Cast("torch::jit::List::iterator*") AssignListIterator begin();
public native @ByVal @Cast("torch::jit::List::iterator*") AssignListIterator end();
public native @Cast("bool") boolean empty();
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListIterator.java
index ad6633bba4b..2c568a7c7f2 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListIterator.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListIterator.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -24,8 +25,8 @@ public class AssignListIterator extends Pointer {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AssignListIterator(Pointer p) { super(p); }
- public AssignListIterator(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it) { super((Pointer)null); allocate(it); }
- private native void allocate(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it);
+ public AssignListIterator(@ByVal @Cast("torch::jit::TreeList::const_iterator*") Tree it) { super((Pointer)null); allocate(it); }
+ private native void allocate(@ByVal @Cast("torch::jit::TreeList::const_iterator*") Tree it);
public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef AssignListIterator rhs);
public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef AssignListIterator rhs);
public native @ByVal @Name("operator *") Assign multiply();
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListMaybe.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListMaybe.java
index 1387fb99e9e..7a054439ee6 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListMaybe.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AssignListMaybe.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -24,8 +25,8 @@ public class AssignListMaybe extends TreeView {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AssignListMaybe(Pointer p) { super(p); }
- public AssignListMaybe(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); }
- private native void allocate(@Const @ByRef TreeRef tree);
+ public AssignListMaybe(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree) { super((Pointer)null); allocate(tree); }
+ private native void allocate(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree);
/* implicit */ public AssignListMaybe(@Const @ByRef AssignList tree) { super((Pointer)null); allocate(tree); }
private native void allocate(@Const @ByRef AssignList tree);
public native @Cast("bool") boolean present();
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Attribute.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Attribute.java
index 27ce7cc23e9..492655c2920 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Attribute.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Attribute.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -28,12 +29,12 @@ public class Attribute extends TreeView {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Attribute(Pointer p) { super(p); }
- public Attribute(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); }
- private native void allocate(@Const @ByRef TreeRef tree);
+ public Attribute(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree) { super((Pointer)null); allocate(tree); }
+ private native void allocate(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree);
public native @ByVal Ident name();
public native @ByVal Expr value();
public static native @ByVal Attribute create(
@Const @ByRef SourceRange range,
@Const @ByRef Ident name,
- @Const @ByRef TreeRef value);
+ @IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree value);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeList.java
index 7e44a5c75ba..3a9630d7692 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeList.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeList.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -25,8 +26,8 @@ public class AttributeList extends TreeView {
public AttributeList(Pointer p) { super(p); }
- public AttributeList(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); }
- private native void allocate(@Const @ByRef TreeRef tree);
+ public AttributeList(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree) { super((Pointer)null); allocate(tree); }
+ private native void allocate(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree);
public native @ByVal @Cast("torch::jit::List::iterator*") AttributeListIterator begin();
public native @ByVal @Cast("torch::jit::List::iterator*") AttributeListIterator end();
public native @Cast("bool") boolean empty();
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeListIterator.java
index 86f3a2a863f..e111e3a1626 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeListIterator.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeListIterator.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -24,8 +25,8 @@ public class AttributeListIterator extends Pointer {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AttributeListIterator(Pointer p) { super(p); }
- public AttributeListIterator(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it) { super((Pointer)null); allocate(it); }
- private native void allocate(@ByVal @Cast("torch::jit::TreeList::const_iterator*") TreeRef it);
+ public AttributeListIterator(@ByVal @Cast("torch::jit::TreeList::const_iterator*") Tree it) { super((Pointer)null); allocate(it); }
+ private native void allocate(@ByVal @Cast("torch::jit::TreeList::const_iterator*") Tree it);
public native @Cast("bool") @Name("operator !=") boolean notEquals(@Const @ByRef AttributeListIterator rhs);
public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef AttributeListIterator rhs);
public native @ByVal @Name("operator *") Attribute multiply();
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributePolicy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributePolicy.java
index 318e6081a01..a8094cf11ea 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributePolicy.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributePolicy.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeValue.java
index 353b3fbda30..5593ee1d929 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeValue.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AttributeValue.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssign.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssign.java
index ce606b315ce..3672a778f4c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssign.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssign.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -25,8 +26,8 @@ public class AugAssign extends Stmt {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AugAssign(Pointer p) { super(p); }
- public AugAssign(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); }
- private native void allocate(@Const @ByRef TreeRef tree);
+ public AugAssign(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree) { super((Pointer)null); allocate(tree); }
+ private native void allocate(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree);
public static native @ByVal AugAssign create(
@Const @ByRef SourceRange range,
@Const @ByRef Expr lhs,
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssignKind.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssignKind.java
index 31fb6cdf990..bccfae87888 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssignKind.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AugAssignKind.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -24,6 +25,6 @@ public class AugAssignKind extends TreeView {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public AugAssignKind(Pointer p) { super(p); }
- public AugAssignKind(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); }
- private native void allocate(@Const @ByRef TreeRef tree);
+ public AugAssignKind(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree) { super((Pointer)null); allocate(tree); }
+ private native void allocate(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowADInplaceOrView.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowADInplaceOrView.java
index 5aeb3816687..dd1141a377e 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowADInplaceOrView.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowADInplaceOrView.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowAutograd.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowAutograd.java
index b7e076813aa..ac8dba407c3 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowAutograd.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchBelowAutograd.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchSkipFunctionalize.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchSkipFunctionalize.java
index 4b1c2d77330..a3180ce4ed2 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchSkipFunctionalize.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoDispatchSkipFunctionalize.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoFwGradMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoFwGradMode.java
index a18572b6266..2271db19fa2 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoFwGradMode.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoFwGradMode.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoGradMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoGradMode.java
index ceb13776019..a12573fb230 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoGradMode.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoGradMode.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoNonVariableTypeMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoNonVariableTypeMode.java
index 114e70da000..f2ab6ffeb5b 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutoNonVariableTypeMode.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutoNonVariableTypeMode.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradCompilerCall.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradCompilerCall.java
new file mode 100644
index 00000000000..6a5e475e9f5
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradCompilerCall.java
@@ -0,0 +1,50 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+@Namespace("torch::dynamo::autograd") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class AutogradCompilerCall extends Pointer {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public AutogradCompilerCall() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public AutogradCompilerCall(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public AutogradCompilerCall(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public AutogradCompilerCall position(long position) {
+ return (AutogradCompilerCall)super.position(position);
+ }
+ @Override public AutogradCompilerCall getPointer(long i) {
+ return new AutogradCompilerCall((Pointer)this).offsetAddress(i);
+ }
+
+ public native void add_size_input(@Const @ByRef SymInt s);
+
+ public native @Cast("size_t") long emplace_hook(@ByRef(true) SafePyObject fn);
+
+ public native @ByRef @NoOffset TensorArgs tensor_args(); public native AutogradCompilerCall tensor_args(TensorArgs setter);
+ public native @StdVector @NoOffset SizeInput all_size_inputs(); public native AutogradCompilerCall all_size_inputs(SizeInput setter);
+ public native @ByRef @Cast("std::vector*") @NoOffset LongVector dyn_size_inputs(); public native AutogradCompilerCall dyn_size_inputs(LongVector setter);
+
+ public native @ByRef @NoOffset NodeCalls node_calls(); public native AutogradCompilerCall node_calls(NodeCalls setter);
+ public native @NoOffset SizeInput.DynType default_dyn_type(); public native AutogradCompilerCall default_dyn_type(SizeInput.DynType setter);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java
index d7cc6f65b8c..18f6e7a5b8d 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradContext.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactory.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactory.java
index 3581dfb6461..a5ae51a0a4e 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactory.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactory.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactoryRegisterer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactoryRegisterer.java
index 74e14258664..f0979bd6e8f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactoryRegisterer.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaFactoryRegisterer.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaInterface.java
index 368bb234eab..e6cc1db93ac 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaInterface.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradMetaInterface.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java
index de586a29db0..3d1a22d889c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AutogradState.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java
index 1937194bbdf..cd9abc598e5 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AvgPool1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies avgpool over a 1-D input.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.AvgPool1d to learn
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.AvgPool1d to learn
* about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::AvgPool1dOptions} class to learn what
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java
index 38ce73924f6..97dc53dedfc 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java
index 2c37a94eef8..7dae207aa06 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class AvgPool1dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dOptions.java
index e816c99a187..da00ecd275f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool1dOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java
index af2351a0914..cc55427c2d9 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AvgPool2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies avgpool over a 2-D input.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.AvgPool2d to learn
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.AvgPool2d to learn
* about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::AvgPool2dOptions} class to learn what
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java
index b5c44b68be7..0081df083fe 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java
index eeddd68607b..9426de9839a 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class AvgPool2dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dOptions.java
index 88055b7067e..d94d57461bd 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool2dOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java
index 3d88a6167c8..ffe9ef787cd 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ AvgPool3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies avgpool over a 3-D input.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.AvgPool3d to learn
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.AvgPool3d to learn
* about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::AvgPool3dOptions} class to learn what
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java
index e1221202c68..47ebb774792 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java
index b21dd9618ea..49b36b2077f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class AvgPool3dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dOptions.java
index 42d125c2084..e6755f12e64 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AvgPool3dOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Await.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Await.java
index 9b8f7335da5..40c7173b29b 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Await.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Await.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitPtr.java
deleted file mode 100644
index 23152f289e3..00000000000
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitPtr.java
+++ /dev/null
@@ -1,154 +0,0 @@
-// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
-
-package org.bytedeco.pytorch;
-
-import org.bytedeco.pytorch.Allocator;
-import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
-import org.bytedeco.pytorch.Module;
-import org.bytedeco.javacpp.annotation.Cast;
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import static org.bytedeco.openblas.global.openblas_nolapack.*;
-import static org.bytedeco.openblas.global.openblas.*;
-
-import static org.bytedeco.pytorch.global.torch.*;
-
-
-@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
-public class AwaitPtr extends Pointer {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public AwaitPtr(Pointer p) { super(p); }
- /** Native array allocator. Access with {@link Pointer#position(long)}. */
- public AwaitPtr(long size) { super((Pointer)null); allocateArray(size); }
- private native void allocateArray(long size);
- @Override public AwaitPtr position(long position) {
- return (AwaitPtr)super.position(position);
- }
- @Override public AwaitPtr getPointer(long i) {
- return new AwaitPtr((Pointer)this).offsetAddress(i);
- }
-
-
- public AwaitPtr() { super((Pointer)null); allocate(); }
- @NoException(true) private native void allocate();
-
- public AwaitPtr(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); }
- @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0);
-
- // This constructor will not increase the ref counter for you.
- // We use the tagged dispatch mechanism to explicitly mark this constructor
- // to not increase the refcount
- public AwaitPtr(Await target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); }
- @NoException(true) private native void allocate(Await target, @ByVal DontIncreaseRefcount arg1);
-
-
-
- public AwaitPtr(@ByRef(true) AwaitPtr rhs) { super((Pointer)null); allocate(rhs); }
- @NoException(true) private native void allocate(@ByRef(true) AwaitPtr rhs);
-
- public native @ByRef @Name("operator =") @NoException(true) AwaitPtr put(@ByRef(true) AwaitPtr rhs);
-
- // Assignment is implemented using copy and swap. That's safe for self
- // assignment.
- // NOLINTNEXTLINE(bugprone-unhandled-self-assignment)
-
- public native @NoException(true) Await get();
-
- public native @ByRef @Name("operator *") @NoException(true) Await multiply();
-
- public native @Name("operator ->") @NoException(true) Await access();
-
- public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean();
-
- public native @NoException(true) void reset();
-
- public native @NoException(true) void swap(@ByRef AwaitPtr rhs);
-
- // We do a lot of null-pointer checks in our code, good to have this be cheap.
- public native @Cast("bool") @NoException(true) boolean defined();
-
- public native @Cast("uint32_t") @NoException(true) int use_count();
-
- public native @Cast("uint32_t") @NoException(true) int weak_use_count();
-
- public native @Cast("bool") @NoException(true) boolean unique();
-
- /**
- * Returns an owning (!) pointer to the underlying object and makes the
- * intrusive_ptr instance invalid. That means the refcount is not decreased.
- * You *must* put the returned pointer back into a intrusive_ptr using
- * intrusive_ptr::reclaim(ptr) to properly destruct it.
- * This is helpful for C APIs.
- */
- public native @NoException(true) Await release();
-
- /**
- * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes
- * over ownership. That means the refcount is not increased.
- * This is the counter-part to intrusive_ptr::release() and the pointer
- * passed in *must* have been created using intrusive_ptr::release().
- */
- public static native @ByVal AwaitPtr reclaim(Await owning_ptr);
-
- /**
- * Takes an owning pointer to TTarget* and creates an intrusive_ptr
- * representing a new reference, i.e. the raw pointer retains
- * ownership.
- */
- public static native @ByVal AwaitPtr reclaim_copy(Await owning_ptr);
-
- /**
- * Allocate a heap object with args and wrap it inside a intrusive_ptr and
- * incref. This is a helper function to let make_intrusive() access private
- * intrusive_ptr constructors.
- */
-
- /**
- * Turn a new instance of TTarget (e.g., literally allocated
- * using new TTarget(...) into an intrusive_ptr. If possible,
- * use intrusive_ptr::make instead which statically guarantees
- * that the allocation was done properly.
- *
- * At the moment, the only reason this method exists is because
- * pybind11 holder types expect to be able to allocate in
- * this way (because pybind11 handles the new allocation itself).
- */
- public static native @ByVal AwaitPtr unsafe_steal_from_new(Await raw_ptr);
-
- /**
- * Turn an instance of TTarget that should not be reference counted
- * (e.g., allocated into an arena with placement new) into an
- * intrusive_ptr. This is gratuitously unsafe and should only be
- * used if you can guarantee that the pointer will not escape and be
- * refcounted as normal.
- *
- * {@code expected_decrefs} is a debugging parameter: it indicates the
- * number of strong owners the intrusive_ptr_target in question is
- * expected to get. In most use cases, this will likely be 1.
- *
- * The reason this method exists is for manually sharing
- * StorageImpls across Tensors in the static runtime. It needs
- * access to private intrusive_ptr members so that the refcounts can
- * be initialized to custom values.
- */
- public static native @ByVal AwaitPtr unsafe_adapt_non_heap_allocated(
- Await raw_ptr,
- @Cast("uint32_t") int expected_decrefs);
-
- /**
- * Turn a **non-owning raw pointer** to an intrusive_ptr. It is
- * the moral equivalent of enable_shared_from_this on a shared pointer.
- *
- * This method is only valid for objects that are already live. If
- * you are looking for the moral equivalent of unique_ptr(T*)
- * constructor, see steal_from_new.
- *
- * TODO: https://github.com/pytorch/pytorch/issues/56482
- */
- public static native @ByVal AwaitPtr unsafe_reclaim_from_nonowning(Await raw_ptr);
-}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitSingleElementType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitSingleElementType.java
index f39c07a2a75..f93b2487d9a 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitSingleElementType.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitSingleElementType.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitType.java
index 702535a3092..a1cd22d31ec 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitType.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/AwaitType.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java
index 302df9666a5..a18cb0fb6bb 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -22,7 +23,7 @@
/** Creates a criterion that measures the Binary Cross Entropy
* between the target and the output.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.BCELoss to learn
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.BCELoss to learn
* about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::BCELossOptions} class to learn what
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java
index 6ec0eb00023..6c4b5a0eb53 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class BCELossImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossOptions.java
index 5a32a0cb0a3..a8f8f0659a4 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCELossOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
// namespace functional
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java
index b4feec62624..a990d43355c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -25,7 +26,7 @@
* class. This version is more numerically stable than using a plain {@code Sigmoid}
* followed by a {@code BCELoss} as, by combining the operations into one layer,
* we take advantage of the log-sum-exp trick for numerical stability.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.BCEWithLogitsLoss to
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.BCEWithLogitsLoss to
* learn about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::BCEWithLogitsLossOptions} class to
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java
index b4cea865ca5..b1b8dcbec0d 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class BCEWithLogitsLossImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossOptions.java
index 27b96ac34f0..000d31327d5 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BCEWithLogitsLossOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
// namespace functional
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16.java
index f7e51e10ccc..4a93c34caac 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
// namespace detail
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16ArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16ArrayRef.java
index a97a23d3891..301469b4a0f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16ArrayRef.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BFloat16ArrayRef.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMeta.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMeta.java
index f7a8ea37cf4..84709cae4ac 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMeta.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMeta.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -31,19 +32,10 @@ public class BackendMeta extends Pointer {
static { Loader.load(); }
/** Default native constructor. */
public BackendMeta() { super((Pointer)null); allocate(); }
- /** Native array allocator. Access with {@link Pointer#position(long)}. */
- public BackendMeta(long size) { super((Pointer)null); allocateArray(size); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public BackendMeta(Pointer p) { super(p); }
- private native void allocate();
- private native void allocateArray(long size);
- @Override public BackendMeta position(long position) {
- return (BackendMeta)super.position(position);
- }
- @Override public BackendMeta getPointer(long i) {
- return new BackendMeta((Pointer)this).offsetAddress(i);
- }
-
- public native @ByVal BackendMetaRef clone(
- @Const @ByRef BackendMetaRef ptr);
+ @IntrusivePtr @Name("c10::make_intrusive") private native void allocate();
+
+ public native @IntrusivePtr("c10::BackendMeta") @Cast({"", "c10::intrusive_ptr&"}) BackendMeta clone(
+ @IntrusivePtr("c10::BackendMeta") @Cast({"", "c10::intrusive_ptr&"}) BackendMeta ptr);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMetaRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMetaRef.java
deleted file mode 100644
index 25a75ec8ca8..00000000000
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BackendMetaRef.java
+++ /dev/null
@@ -1,154 +0,0 @@
-// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
-
-package org.bytedeco.pytorch;
-
-import org.bytedeco.pytorch.Allocator;
-import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
-import org.bytedeco.pytorch.Module;
-import org.bytedeco.javacpp.annotation.Cast;
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import static org.bytedeco.openblas.global.openblas_nolapack.*;
-import static org.bytedeco.openblas.global.openblas.*;
-
-import static org.bytedeco.pytorch.global.torch.*;
-
-
-@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
-public class BackendMetaRef extends Pointer {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public BackendMetaRef(Pointer p) { super(p); }
- /** Native array allocator. Access with {@link Pointer#position(long)}. */
- public BackendMetaRef(long size) { super((Pointer)null); allocateArray(size); }
- private native void allocateArray(long size);
- @Override public BackendMetaRef position(long position) {
- return (BackendMetaRef)super.position(position);
- }
- @Override public BackendMetaRef getPointer(long i) {
- return new BackendMetaRef((Pointer)this).offsetAddress(i);
- }
-
-
- public BackendMetaRef() { super((Pointer)null); allocate(); }
- @NoException(true) private native void allocate();
-
- public BackendMetaRef(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); }
- @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0);
-
- // This constructor will not increase the ref counter for you.
- // We use the tagged dispatch mechanism to explicitly mark this constructor
- // to not increase the refcount
- public BackendMetaRef(BackendMeta target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); }
- @NoException(true) private native void allocate(BackendMeta target, @ByVal DontIncreaseRefcount arg1);
-
-
-
- public BackendMetaRef(@ByRef(true) BackendMetaRef rhs) { super((Pointer)null); allocate(rhs); }
- @NoException(true) private native void allocate(@ByRef(true) BackendMetaRef rhs);
-
- public native @ByRef @Name("operator =") @NoException(true) BackendMetaRef put(@ByRef(true) BackendMetaRef rhs);
-
- // Assignment is implemented using copy and swap. That's safe for self
- // assignment.
- // NOLINTNEXTLINE(bugprone-unhandled-self-assignment)
-
- public native @NoException(true) BackendMeta get();
-
- public native @ByRef @Name("operator *") @NoException(true) BackendMeta multiply();
-
- public native @Name("operator ->") @NoException(true) BackendMeta access();
-
- public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean();
-
- public native @NoException(true) void reset();
-
- public native @NoException(true) void swap(@ByRef BackendMetaRef rhs);
-
- // We do a lot of null-pointer checks in our code, good to have this be cheap.
- public native @Cast("bool") @NoException(true) boolean defined();
-
- public native @Cast("uint32_t") @NoException(true) int use_count();
-
- public native @Cast("uint32_t") @NoException(true) int weak_use_count();
-
- public native @Cast("bool") @NoException(true) boolean unique();
-
- /**
- * Returns an owning (!) pointer to the underlying object and makes the
- * intrusive_ptr instance invalid. That means the refcount is not decreased.
- * You *must* put the returned pointer back into a intrusive_ptr using
- * intrusive_ptr::reclaim(ptr) to properly destruct it.
- * This is helpful for C APIs.
- */
- public native @NoException(true) BackendMeta release();
-
- /**
- * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes
- * over ownership. That means the refcount is not increased.
- * This is the counter-part to intrusive_ptr::release() and the pointer
- * passed in *must* have been created using intrusive_ptr::release().
- */
- public static native @ByVal BackendMetaRef reclaim(BackendMeta owning_ptr);
-
- /**
- * Takes an owning pointer to TTarget* and creates an intrusive_ptr
- * representing a new reference, i.e. the raw pointer retains
- * ownership.
- */
- public static native @ByVal BackendMetaRef reclaim_copy(BackendMeta owning_ptr);
-
- /**
- * Allocate a heap object with args and wrap it inside a intrusive_ptr and
- * incref. This is a helper function to let make_intrusive() access private
- * intrusive_ptr constructors.
- */
-
- /**
- * Turn a new instance of TTarget (e.g., literally allocated
- * using new TTarget(...) into an intrusive_ptr. If possible,
- * use intrusive_ptr::make instead which statically guarantees
- * that the allocation was done properly.
- *
- * At the moment, the only reason this method exists is because
- * pybind11 holder types expect to be able to allocate in
- * this way (because pybind11 handles the new allocation itself).
- */
- public static native @ByVal BackendMetaRef unsafe_steal_from_new(BackendMeta raw_ptr);
-
- /**
- * Turn an instance of TTarget that should not be reference counted
- * (e.g., allocated into an arena with placement new) into an
- * intrusive_ptr. This is gratuitously unsafe and should only be
- * used if you can guarantee that the pointer will not escape and be
- * refcounted as normal.
- *
- * {@code expected_decrefs} is a debugging parameter: it indicates the
- * number of strong owners the intrusive_ptr_target in question is
- * expected to get. In most use cases, this will likely be 1.
- *
- * The reason this method exists is for manually sharing
- * StorageImpls across Tensors in the static runtime. It needs
- * access to private intrusive_ptr members so that the refcounts can
- * be initialized to custom values.
- */
- public static native @ByVal BackendMetaRef unsafe_adapt_non_heap_allocated(
- BackendMeta raw_ptr,
- @Cast("uint32_t") int expected_decrefs);
-
- /**
- * Turn a **non-owning raw pointer** to an intrusive_ptr. It is
- * the moral equivalent of enable_shared_from_this on a shared pointer.
- *
- * This method is only valid for objects that are already live. If
- * you are looking for the moral equivalent of unique_ptr(T*)
- * constructor, see steal_from_new.
- *
- * TODO: https://github.com/pytorch/pytorch/issues/56482
- */
- public static native @ByVal BackendMetaRef unsafe_reclaim_from_nonowning(BackendMeta raw_ptr);
-}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/DistBackendError.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Backtrace.java
similarity index 63%
rename from pytorch/src/gen/java/org/bytedeco/pytorch/DistBackendError.java
rename to pytorch/src/gen/java/org/bytedeco/pytorch/Backtrace.java
index 99321bfa0d8..aa665fde989 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/DistBackendError.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Backtrace.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,16 +13,21 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
-// Used for collective communication library errors from the distributed module.
-// These turn into DistBackendError when they cross into Python.
-@Namespace("c10") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
-public class DistBackendError extends DistError {
+/**
+ * Interface for a value that is computed on first access.
+ */
+@Name("c10::LazyValue") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class Backtrace extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public DistBackendError(Pointer p) { super(p); }
+ public Backtrace(Pointer p) { super(p); }
+
+ public native @StdString BytePointer get();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BarrierOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BarrierOptions.java
new file mode 100644
index 00000000000..fcff36ddb20
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BarrierOptions.java
@@ -0,0 +1,43 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+@Namespace("c10d") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class BarrierOptions extends Pointer {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public BarrierOptions() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public BarrierOptions(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public BarrierOptions(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public BarrierOptions position(long position) {
+ return (BarrierOptions)super.position(position);
+ }
+ @Override public BarrierOptions getPointer(long i) {
+ return new BarrierOptions((Pointer)this).offsetAddress(i);
+ }
+
+ public native @ByRef @Cast("std::vector*") LongVector device_ids(); public native BarrierOptions device_ids(LongVector setter);
+ public native @ByRef Milliseconds timeout(); public native BarrierOptions timeout(Milliseconds setter);
+ public native @ByRef DeviceOptional device(); public native BarrierOptions device(DeviceOptional setter);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java
index ef67cd0b4aa..16d4e54b089 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -22,7 +23,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies the BatchNorm1d function.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.BatchNorm1d to learn
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.BatchNorm1d to learn
* about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::BatchNorm1dOptions} class to learn
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java
index bdc37ef9e6e..d75c5d00853 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java
index b4ae722bcce..0fa03b8821d 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplBaseBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java
index 3abc8ed6f4f..a487c9c2454 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm1dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class BatchNorm1dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java
index dca3bbd4e46..f14ed975382 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -22,7 +23,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies the BatchNorm2d function.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.BatchNorm2d to learn
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.BatchNorm2d to learn
* about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::BatchNorm2dOptions} class to learn
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java
index 7cee0bbb592..cf2d9738174 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java
index 098f0c726bc..aedc46fefd1 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplBaseBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java
index 5b4486f119c..e07f2e7eb04 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm2dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class BatchNorm2dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java
index 4c53a2502d6..57f2905105e 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -22,7 +23,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies the BatchNorm3d function.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.BatchNorm3d to learn
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.BatchNorm3d to learn
* about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::BatchNorm3dOptions} class to learn
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java
index aa430c11fbd..f4e87004a98 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java
index 51b0783d20e..994c2b224bd 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplBaseBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java
index 1ffac3653fa..0e0c66b1df1 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNorm3dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class BatchNorm3dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormFuncOptions.java
index 82a33aee581..2166af8925b 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormFuncOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormFuncOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormOptions.java
index d2ebc2c0b58..e1012b0546b 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchNormOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSize.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSize.java
index b0b0957520b..03b7ba6eb65 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSize.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSize.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeOptional.java
index 526bfd696df..2bae44b8924 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeOptional.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeOptional.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,10 +13,12 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
-@NoOffset @Name("c10::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@NoOffset @Name("std::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class BatchSizeOptional extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeSampler.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeSampler.java
index a87bb33cff1..0a4b821dfcf 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeSampler.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BatchSizeSampler.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java
index 2ff1d54f60c..2922fa50ae2 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Bilinear ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies a billinear transformation with optional bias.
- * See https://pytorch.org/docs/master/generated/torch.nn.Bilinear.html to
+ * See https://pytorch.org/docs/main/generated/torch.nn.Bilinear.html to
* learn about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::BilinearOptions} class to learn what
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java
index 20f2c3784bf..7bda99227c6 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class BilinearImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearOptions.java
index d438dfc0b6d..2004b2917aa 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BilinearOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BinOp.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BinOp.java
index 9dd22d383c1..87faabf3beb 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BinOp.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BinOp.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -28,8 +29,8 @@ public class BinOp extends Expr {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public BinOp(Pointer p) { super(p); }
- public BinOp(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); }
- private native void allocate(@Const @ByRef TreeRef tree);
+ public BinOp(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree) { super((Pointer)null); allocate(tree); }
+ private native void allocate(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree);
public native @ByVal Expr lhs();
public native @ByVal Expr rhs();
public static native @ByVal BinOp create(
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java
index 7e38b95d478..2c8827ff6b4 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Blob.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -111,5 +112,5 @@ public class Blob extends Pointer {
/**
* \brief Swaps the underlying storage of two blobs.
*/
- public native void swap(@ByRef Blob rhs);
+ public native @NoException(true) void swap(@ByRef Blob rhs);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Block.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Block.java
index 6528e789332..426e6e02565 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Block.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Block.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BlockArrayRef.java
index 08f07737a0f..b262fac0d36 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockArrayRef.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BlockArrayRef.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java
index 42eabf3ad02..17e7d549b46 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BlockWrap.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolArrayRef.java
index 9830abb5b28..7c77931ea39 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolArrayRef.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolArrayRef.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolOptional.java
index 59675b1de16..3c8f93b5e49 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolOptional.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolOptional.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,10 +13,12 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
-@NoOffset @Name("c10::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@NoOffset @Name("std::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class BoolOptional extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolType.java
index 1f3e6816cbd..849eb63ce57 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolType.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolType.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolTypePtr.java
index 6cdbdc83f8f..a0a9b3a840b 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolTypePtr.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolTypePtr.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVector.java
index 1b9db184bef..d7c4d9d711d 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVector.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVector.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVectorOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVectorOptional.java
index 5c0ae0ddcc1..1c77676bc75 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVectorOptional.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BoolVectorOptional.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,10 +13,12 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
-@NoOffset @Name("c10::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@NoOffset @Name("std::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class BoolVectorOptional extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanElementReference.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanElementReference.java
index 024c78a2095..af89c7f720f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanElementReference.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanElementReference.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -24,7 +25,7 @@ public class BooleanElementReference extends Pointer {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public BooleanElementReference(Pointer p) { super(p); }
- public native @Name("operator std::conditional_t::type>::value,const bool&,bool>") boolean getBoolean();
+ public native @Name("operator std::conditional_t::type>,const bool&,bool>") boolean getBoolean();
@@ -35,7 +36,7 @@ public class BooleanElementReference extends Pointer {
public native @Const @ByRef IValue get();
- private static native @Namespace void swap(@ByRef(true) BooleanElementReference lhs, @ByRef(true) BooleanElementReference rhs);
+ private static native @Namespace @NoException(true) void swap(@ByRef(true) BooleanElementReference lhs, @ByRef(true) BooleanElementReference rhs);
public void swap(BooleanElementReference rhs) { swap(this, rhs); }
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanList.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanList.java
index 64488385c2c..6b45f706ea3 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanList.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanList.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,10 +13,12 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
-@Name("c10::List") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("c10::List") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class BooleanList extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanListIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanListIterator.java
index 8aac364c51c..cbba0d4f792 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanListIterator.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BooleanListIterator.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Break.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Break.java
index 311a86ff665..809c06179d5 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Break.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Break.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -24,7 +25,7 @@ public class Break extends Stmt {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Break(Pointer p) { super(p); }
- public Break(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); }
- private native void allocate(@Const @ByRef TreeRef tree);
+ public Break(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree) { super((Pointer)null); allocate(tree); }
+ private native void allocate(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree);
public static native @ByVal Break create(@Const @ByRef SourceRange range);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BroadcastOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BroadcastOptions.java
new file mode 100644
index 00000000000..1d27413a439
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BroadcastOptions.java
@@ -0,0 +1,44 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+@Namespace("c10d") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class BroadcastOptions extends Pointer {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public BroadcastOptions() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public BroadcastOptions(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public BroadcastOptions(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public BroadcastOptions position(long position) {
+ return (BroadcastOptions)super.position(position);
+ }
+ @Override public BroadcastOptions getPointer(long i) {
+ return new BroadcastOptions((Pointer)this).offsetAddress(i);
+ }
+
+ public native @Cast("int64_t") long rootRank(); public native BroadcastOptions rootRank(long setter);
+ public native @Cast("int64_t") long rootTensor(); public native BroadcastOptions rootTensor(long setter);
+ public native @ByRef Milliseconds timeout(); public native BroadcastOptions timeout(Milliseconds setter);
+ public native @Cast("bool") boolean asyncOp(); public native BroadcastOptions asyncOp(boolean setter);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BucketAccumulator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BucketAccumulator.java
new file mode 100644
index 00000000000..98f4304e71c
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BucketAccumulator.java
@@ -0,0 +1,44 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+// Local accumulator type for a single bucket.
+@Namespace("c10d") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class BucketAccumulator extends Pointer {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public BucketAccumulator() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public BucketAccumulator(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public BucketAccumulator(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public BucketAccumulator position(long position) {
+ return (BucketAccumulator)super.position(position);
+ }
+ @Override public BucketAccumulator getPointer(long i) {
+ return new BucketAccumulator((Pointer)this).offsetAddress(i);
+ }
+
+ public native @ByRef @Cast("std::vector*") SizeTVector indices(); public native BucketAccumulator indices(SizeTVector setter);
+ public native @Cast("size_t") long size(); public native BucketAccumulator size(long setter);
+ public native @Cast("size_t") long size_limit(); public native BucketAccumulator size_limit(long setter);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BufferPolicy.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BufferPolicy.java
index d7eacdbc360..ccf8cdb69e4 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BufferPolicy.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BufferPolicy.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinFunction.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinFunction.java
index 5f9bf340946..e405a3a8d2a 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinFunction.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinFunction.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinModule.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinModule.java
index 097921b400e..bb6afe3ba45 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinModule.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BuiltinModule.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -24,12 +25,12 @@ public class BuiltinModule extends SugaredValue {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public BuiltinModule(Pointer p) { super(p); }
- public BuiltinModule(@StdString BytePointer name, @ByVal(nullValue = "c10::optional(at::nullopt)") LongOptional version) { super((Pointer)null); allocate(name, version); }
- private native void allocate(@StdString BytePointer name, @ByVal(nullValue = "c10::optional(at::nullopt)") LongOptional version);
+ public BuiltinModule(@StdString BytePointer name, @ByVal(nullValue = "std::optional(at::nullopt)") LongOptional version) { super((Pointer)null); allocate(name, version); }
+ private native void allocate(@StdString BytePointer name, @ByVal(nullValue = "std::optional(at::nullopt)") LongOptional version);
public BuiltinModule(@StdString BytePointer name) { super((Pointer)null); allocate(name); }
private native void allocate(@StdString BytePointer name);
- public BuiltinModule(@StdString String name, @ByVal(nullValue = "c10::optional(at::nullopt)") LongOptional version) { super((Pointer)null); allocate(name, version); }
- private native void allocate(@StdString String name, @ByVal(nullValue = "c10::optional(at::nullopt)") LongOptional version);
+ public BuiltinModule(@StdString String name, @ByVal(nullValue = "std::optional(at::nullopt)") LongOptional version) { super((Pointer)null); allocate(name, version); }
+ private native void allocate(@StdString String name, @ByVal(nullValue = "std::optional(at::nullopt)") LongOptional version);
public BuiltinModule(@StdString String name) { super((Pointer)null); allocate(name); }
private native void allocate(@StdString String name);
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java
index 49d4a670633..d8c26caea26 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteArrayRef.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ByteOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteOptional.java
index 829928771c5..0b3912ccbb4 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ByteOptional.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteOptional.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,10 +13,12 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
-@NoOffset @Name("c10::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@NoOffset @Name("std::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ByteOptional extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerPair.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerPair.java
index ec56895cfd0..6efd0f0d546 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerPair.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerPair.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerPairOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerPairOptional.java
index 6d24b3b1f00..3e0d74da818 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerPairOptional.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerPairOptional.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,10 +13,12 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
-@NoOffset @Name("c10::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@NoOffset @Name("std::optional >") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class BytePointerPairOptional extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerVector.java
index 5127b9f713c..e11ee212322 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerVector.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/BytePointerVector.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ByteVector.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteVector.java
new file mode 100644
index 00000000000..2012416254e
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ByteVector.java
@@ -0,0 +1,91 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+@Name("std::vector") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class ByteVector extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public ByteVector(Pointer p) { super(p); }
+ public ByteVector(byte value) { this(1); put(0, value); }
+ public ByteVector(byte ... array) { this(array.length); put(array); }
+ public ByteVector() { allocate(); }
+ public ByteVector(long n) { allocate(n); }
+ private native void allocate();
+ private native void allocate(@Cast("size_t") long n);
+ public native @Name("operator =") @ByRef ByteVector put(@ByRef ByteVector x);
+
+ public boolean empty() { return size() == 0; }
+ public native long size();
+ public void clear() { resize(0); }
+ public native void resize(@Cast("size_t") long n);
+
+ public byte front() { return get(0); }
+ public byte back() { return get(size() - 1); }
+ @Index(function = "at") public native @Cast("uint8_t") byte get(@Cast("size_t") long i);
+ public native ByteVector put(@Cast("size_t") long i, byte value);
+
+ public native @ByVal Iterator insert(@ByVal Iterator pos, @Cast("uint8_t") byte value);
+ public native @ByVal Iterator erase(@ByVal Iterator pos);
+ public native @ByVal Iterator begin();
+ public native @ByVal Iterator end();
+ @NoOffset @Name("iterator") public static class Iterator extends Pointer {
+ public Iterator(Pointer p) { super(p); }
+ public Iterator() { }
+
+ public native @Name("operator ++") @ByRef Iterator increment();
+ public native @Name("operator ==") boolean equals(@ByRef Iterator it);
+ public native @Name("operator *") @Cast("uint8_t") byte get();
+ }
+
+ public byte[] get() {
+ byte[] array = new byte[size() < Integer.MAX_VALUE ? (int)size() : Integer.MAX_VALUE];
+ for (int i = 0; i < array.length; i++) {
+ array[i] = get(i);
+ }
+ return array;
+ }
+ @Override public String toString() {
+ return java.util.Arrays.toString(get());
+ }
+
+ public byte pop_back() {
+ long size = size();
+ byte value = get(size - 1);
+ resize(size - 1);
+ return value;
+ }
+ public ByteVector push_back(byte value) {
+ long size = size();
+ resize(size + 1);
+ return put(size, value);
+ }
+ public ByteVector put(byte value) {
+ if (size() != 1) { resize(1); }
+ return put(0, value);
+ }
+ public ByteVector put(byte ... array) {
+ if (size() != array.length) { resize(array.length); }
+ for (int i = 0; i < array.length; i++) {
+ put(i, array[i]);
+ }
+ return this;
+ }
+}
+
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/C10FlagParser.java b/pytorch/src/gen/java/org/bytedeco/pytorch/C10FlagParser.java
index 0175cf91c61..6f02e1ac17d 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/C10FlagParser.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/C10FlagParser.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/C10dLogger.java b/pytorch/src/gen/java/org/bytedeco/pytorch/C10dLogger.java
new file mode 100644
index 00000000000..c2e20f8344c
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/C10dLogger.java
@@ -0,0 +1,36 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+@Namespace("c10d") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class C10dLogger extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public C10dLogger(Pointer p) { super(p); }
+
+ public C10dLogger(@Const @ByRef C10dLogger arg0) { super((Pointer)null); allocate(arg0); }
+ private native void allocate(@Const @ByRef C10dLogger arg0);
+
+ public native @ByRef @Name("operator =") C10dLogger put(@Const @ByRef C10dLogger arg0);
+
+ public native void log(@Const @ByRef C10dLoggingData data);
+ public static native C10dLogger getLogger();
+ public static native void registerLogger(@UniquePtr C10dLogger arg0);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/C10dLoggingData.java b/pytorch/src/gen/java/org/bytedeco/pytorch/C10dLoggingData.java
new file mode 100644
index 00000000000..774b3f77f76
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/C10dLoggingData.java
@@ -0,0 +1,47 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+// a generic logging data struct that holds different types of logging data.
+// starting with key value pairs of strings and integers,
+// It can be extended to more types as needed.
+@Namespace("c10d") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class C10dLoggingData extends Pointer {
+ static { Loader.load(); }
+ /** Default native constructor. */
+ public C10dLoggingData() { super((Pointer)null); allocate(); }
+ /** Native array allocator. Access with {@link Pointer#position(long)}. */
+ public C10dLoggingData(long size) { super((Pointer)null); allocateArray(size); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public C10dLoggingData(Pointer p) { super(p); }
+ private native void allocate();
+ private native void allocateArray(long size);
+ @Override public C10dLoggingData position(long position) {
+ return (C10dLoggingData)super.position(position);
+ }
+ @Override public C10dLoggingData getPointer(long i) {
+ return new C10dLoggingData((Pointer)this).offsetAddress(i);
+ }
+
+ // logging fields that are string types.
+ public native @ByRef @NoOffset StringStringMap strings(); public native C10dLoggingData strings(StringStringMap setter);
+ // logging fields that are int64_t types.
+ public native @ByRef @NoOffset StringLongMap integers(); public native C10dLoggingData integers(StringLongMap setter);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java
index 09ceda6e127..0fcdac0fa95 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CELU ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies celu over a given input.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.CELU to learn
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.CELU to learn
* about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::CELUOptions} class to learn what
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java
index ca1bf9c15fd..c5755d11b03 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class CELUImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUOptions.java
index 433be6070af..3a8f7d874ee 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CELUOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CELUOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
// namespace functional
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java
index be3c5e63022..36feaf29019 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CPUGeneratorImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -38,7 +39,7 @@ public class CPUGeneratorImpl extends GeneratorImpl {
public native @Cast("uint64_t") long current_seed();
public native @Cast("uint64_t") long seed();
public native void set_state(@Const @ByRef TensorImpl new_state);
- public native @ByVal TensorImplPtr get_state();
+ public native @IntrusivePtr("c10::TensorImpl") @Cast({"", "c10::intrusive_ptr&"}) TensorImpl get_state();
public static native DeviceType device_type();
public native @Cast("uint32_t") int random();
public native @Cast("uint64_t") long random64();
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java
index 72bfd641ff3..e46a1328190 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CTCLoss ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** The Connectionist Temporal Classification loss.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.CTCLoss to learn
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.CTCLoss to learn
* about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::CTCLossOptions} class to learn what
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java
index c15d39e20c2..a10426829c4 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class CTCLossImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossOptions.java
index 910b3ff8d97..e27d982ac77 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CTCLossOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
// namespace functional
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksArgs.java
index bda89287dc0..ff3aa24379c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksArgs.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksArgs.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java
index 3663700ec5d..2ce28032170 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CUDAHooksInterface.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -76,6 +77,8 @@ public class CUDAHooksInterface extends AcceleratorHooksInterface {
public native @Cast("bool") boolean hasCuSOLVER();
+ public native @Cast("bool") boolean hasCuBLASLt();
+
public native @Cast("bool") boolean hasROCM();
public native @Cast("const at::cuda::NVRTC*") @ByRef Pointer nvrtc();
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CacheKey.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CacheKey.java
new file mode 100644
index 00000000000..77dcaec0bcd
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CacheKey.java
@@ -0,0 +1,46 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+@Namespace("torch::dynamo::autograd") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class CacheKey extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public CacheKey(Pointer p) { super(p); }
+
+ // Key to find the next node in the shadow graph. We use C++ RTTI for the
+ // type of the node (ntype), then a key generated with a visitor pattern.
+ public CacheKey(@ByRef @Cast("std::type_index*") Pointer ntype, @Cast("const uint8_t*") BytePointer key, @Cast("uint16_t") short len) { super((Pointer)null); allocate(ntype, key, len); }
+ private native void allocate(@ByRef @Cast("std::type_index*") Pointer ntype, @Cast("const uint8_t*") BytePointer key, @Cast("uint16_t") short len);
+ public CacheKey(@ByRef @Cast("std::type_index*") Pointer ntype, @Cast("const uint8_t*") ByteBuffer key, @Cast("uint16_t") short len) { super((Pointer)null); allocate(ntype, key, len); }
+ private native void allocate(@ByRef @Cast("std::type_index*") Pointer ntype, @Cast("const uint8_t*") ByteBuffer key, @Cast("uint16_t") short len);
+ public CacheKey(@ByRef @Cast("std::type_index*") Pointer ntype, @Cast("const uint8_t*") byte[] key, @Cast("uint16_t") short len) { super((Pointer)null); allocate(ntype, key, len); }
+ private native void allocate(@ByRef @Cast("std::type_index*") Pointer ntype, @Cast("const uint8_t*") byte[] key, @Cast("uint16_t") short len);
+
+ public native @Cast("bool") @Name("operator <") boolean lessThan(@Const @ByRef CacheKey other);
+
+ public native @Cast("bool") @Name("operator ==") boolean equals(@Const @ByRef CacheKey other);
+
+ public native @Cast("size_t") long hash();
+
+ public native @ByRef @Cast("std::type_index*") Pointer node_type(); public native CacheKey node_type(Pointer setter);
+ public native @Cast("uint16_t") short key_size(); public native CacheKey key_size(short setter);
+ public native @Cast("const uint8_t*") BytePointer key(); public native CacheKey key(BytePointer setter);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CacheKeyBuffer.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CacheKeyBuffer.java
new file mode 100644
index 00000000000..1f75d286956
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CacheKeyBuffer.java
@@ -0,0 +1,35 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+@Namespace("torch::dynamo::autograd") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class CacheKeyBuffer extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public CacheKeyBuffer(Pointer p) { super(p); }
+
+ public CacheKeyBuffer(@Cast("const uint8_t*") BytePointer key, @Cast("uint16_t") short len) { super((Pointer)null); allocate(key, len); }
+ private native void allocate(@Cast("const uint8_t*") BytePointer key, @Cast("uint16_t") short len);
+ public CacheKeyBuffer(@Cast("const uint8_t*") ByteBuffer key, @Cast("uint16_t") short len) { super((Pointer)null); allocate(key, len); }
+ private native void allocate(@Cast("const uint8_t*") ByteBuffer key, @Cast("uint16_t") short len);
+ public CacheKeyBuffer(@Cast("const uint8_t*") byte[] key, @Cast("uint16_t") short len) { super((Pointer)null); allocate(key, len); }
+ private native void allocate(@Cast("const uint8_t*") byte[] key, @Cast("uint16_t") short len);
+ public native @Cast("const uint8_t*") BytePointer get();
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Call.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Call.java
index 4b0f13710ad..5d7f1eb2d34 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Call.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Call.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleType.java
index 6cdc616619f..e3bd8091f70 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleType.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleType.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleTypePtr.java
index 02c266c4cce..072b05c45f4 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleTypePtr.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CapsuleTypePtr.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CastValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CastValue.java
index 485082ee80a..4164b986f9c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CastValue.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CastValue.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java
index 4bd639f1d2a..3756cba2e7b 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchDataset.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,10 +13,12 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
-@Name("torch::data::datasets::BatchDataset,c10::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("torch::data::datasets::BatchDataset,std::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ChunkBatchDataset extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java
index 372fe258744..225630df0ab 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedBatchDataset.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,10 +13,12 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
-@Name("torch::data::datasets::BatchDataset >,c10::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("torch::data::datasets::BatchDataset >,std::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ChunkBatchSharedBatchDataset extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java
index ceed73e0d0f..25f7ef669d2 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkBatchSharedTensorBatchDataset.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,10 +13,12 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
-@Name("torch::data::datasets::BatchDataset >,c10::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("torch::data::datasets::BatchDataset >,std::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ChunkBatchSharedTensorBatchDataset extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java
index a95b3f7fbf2..0dbed3a8e6e 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataReader.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java
index 25de5e1c011..3b20daf8f56 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDataset.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDatasetOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDatasetOptions.java
index c7c7ea60241..b0c4ba70fc2 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDatasetOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkDatasetOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
// namespace detail
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java
index d90c8378d8b..3a8b2770d2f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapBatchDataset.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java
index 76e2b21ce91..c0940c290dd 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapDataset.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java
index 191ed20c628..b0e5062563b 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorBatchDataset.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorDataset.java
index 11471071c9c..ca3f190f057 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkMapTensorDataset.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java
index eff35875117..d157ad453ba 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoader.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java
index d2b3dd7d98e..0727473c78b 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomDataLoaderBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoader.java
index 316fa2a369e..d584a2731a6 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoader.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoader.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoaderBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoaderBase.java
index 771c1076a83..17b5fe6767c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoaderBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRandomTensorDataLoaderBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRecordIterator.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRecordIterator.java
index aa22654d8d7..33833dead5f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRecordIterator.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkRecordIterator.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java
index 098e8c2c591..d3875745ed8 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedBatchDataset.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedTensorBatchDataset.java
index 1daed38af62..ca2d5b80381 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedTensorBatchDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkSharedTensorBatchDataset.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java
index 48649299a16..52e656ff8fd 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulDataset.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulTensorDataset.java
index d40d1dc2c25..3473d534e49 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulTensorDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkStatefulTensorDataset.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java
index c9c16904e1b..81fdaf27545 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorBatchDataset.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,10 +13,12 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
-@Name("torch::data::datasets::BatchDataset,c10::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@Name("torch::data::datasets::BatchDataset,std::optional,size_t>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ChunkTensorBatchDataset extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataReader.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataReader.java
index 3f165de5493..c354fa645db 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataReader.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataReader.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataset.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataset.java
index bb02ac9f39a..668f11d5253 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataset.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ChunkTensorDataset.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassAttribute.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassAttribute.java
index 245e1e947ff..859180c2f12 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassAttribute.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassAttribute.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassDef.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassDef.java
index 4f6e30671a4..8deafc600b6 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassDef.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassDef.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -24,8 +25,8 @@ public class ClassDef extends TreeView {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ClassDef(Pointer p) { super(p); }
- public ClassDef(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); }
- private native void allocate(@Const @ByRef TreeRef tree);
+ public ClassDef(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree) { super((Pointer)null); allocate(tree); }
+ private native void allocate(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree);
public native @ByVal ClassDef withName(@StdString BytePointer new_name);
public native @ByVal ClassDef withName(@StdString String new_name);
public native @ByVal Ident name();
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassType.java
index 8a19ca8e0ee..4118b5e9e37 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassType.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassType.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -50,6 +51,21 @@ public static class Property extends Pointer {
}
// Create a class type with name `name` and its methods stored in `cu`.
+ public static native @SharedPtr("c10::ClassType") @ByVal ClassType create(
+ @ByVal QualifiedNameOptional qualifiedName,
+ @WeakPtr("torch::jit::CompilationUnit") @ByVal CompilationUnit cu,
+ @Cast("bool") boolean is_module/*=false*/,
+ @StdString BytePointer doc_string/*=""*/,
+ @ByVal(nullValue = "std::vector{}") StringVector unresolved_class_attributes);
+ public static native @SharedPtr("c10::ClassType") @ByVal ClassType create(
+ @ByVal QualifiedNameOptional qualifiedName,
+ @WeakPtr("torch::jit::CompilationUnit") @ByVal CompilationUnit cu);
+ public static native @SharedPtr("c10::ClassType") @ByVal ClassType create(
+ @ByVal QualifiedNameOptional qualifiedName,
+ @WeakPtr("torch::jit::CompilationUnit") @ByVal CompilationUnit cu,
+ @Cast("bool") boolean is_module/*=false*/,
+ @StdString String doc_string/*=""*/,
+ @ByVal(nullValue = "std::vector{}") StringVector unresolved_class_attributes);
public native @Cast("bool") boolean equals(@Const @ByRef Type rhs);
@@ -243,7 +259,7 @@ public native void checkForwardHookSchema(
public native void unsafeRemoveMethod(@StdString BytePointer name);
public native void unsafeRemoveMethod(@StdString String name);
- public native @SharedPtr CompilationUnit compilation_unit();
+ public native @SharedPtr("torch::jit::CompilationUnit") @ByVal CompilationUnit compilation_unit();
// generate a refined version of this class.
// It has the same name but the slot Types are subtypes of
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassTypePropertyOptional.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassTypePropertyOptional.java
index 0f6ee68e172..7223c08dae0 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassTypePropertyOptional.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassTypePropertyOptional.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,10 +13,12 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
-@NoOffset @Name("c10::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+@NoOffset @Name("std::optional") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ClassTypePropertyOptional extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassValue.java
index b740cf551de..c006d9cc409 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClassValue.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClassValue.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ClosureValue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ClosureValue.java
index 8a2c916eebd..2025e194e4e 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ClosureValue.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ClosureValue.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java
index b779f88056b..2fbec6db4b2 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Code.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CodeImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CodeImpl.java
index 56fe902a48c..da45fe4515b 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CodeImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CodeImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CommHookInterface.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CommHookInterface.java
new file mode 100644
index 00000000000..cea0004a55d
--- /dev/null
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CommHookInterface.java
@@ -0,0 +1,43 @@
+// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
+
+package org.bytedeco.pytorch;
+
+import org.bytedeco.pytorch.Allocator;
+import org.bytedeco.pytorch.Function;
+import org.bytedeco.pytorch.Module;
+import org.bytedeco.javacpp.annotation.Cast;
+import java.nio.*;
+import org.bytedeco.javacpp.*;
+import org.bytedeco.javacpp.annotation.*;
+
+import static org.bytedeco.javacpp.presets.javacpp.*;
+import static org.bytedeco.openblas.global.openblas_nolapack.*;
+import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
+
+import static org.bytedeco.pytorch.global.torch.*;
+
+
+// Base class of both `PythonCommHook` and `CppCommHook`.
+// Requires implementing 1) `runHook` method that communicates gradients
+// asynchronously, and 2) `parseHookResult` method that converts the hook
+// result into a tensor.
+@Namespace("c10d") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+public class CommHookInterface extends Pointer {
+ static { Loader.load(); }
+ /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
+ public CommHookInterface(Pointer p) { super(p); }
+
+
+ // Passes the input grad bucket to the registered communication hook.
+ // Once the tensor in the bucket are ready, kicks off the hook asynchronously
+ // and returns a future that holds the communication results.
+ public native @IntrusivePtr("c10::ivalue::Future") @Cast({"", "c10::intrusive_ptr&"}) Future runHook(
+ @ByRef GradBucket bucket);
+
+ // Returns the resulting tensor once the communication hook result is
+ // ready. The resulting tensor will then be copied to the grads of
+ // individual parameters.
+ public native @ByVal Tensor parseHookResult(@Const @ByRef IValue result);
+}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java
index 80bfa8cea8e..954bb5cc311 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CompilationUnit.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -71,7 +72,7 @@ public enum FunctionType { Method(0), Hook(1), PreHook(2);
@Const @ByRef ResolverVector defResolvers,
@Const Self self,
@Cast("bool") boolean shouldMangle/*=false*/,
- @ByVal(nullValue = "c10::optional(c10::nullopt)") SizeTOptional operator_set_version);
+ @ByVal(nullValue = "std::optional(c10::nullopt)") SizeTOptional operator_set_version);
public native @ByVal FunctionVector define(
@Const @ByRef QualifiedNameOptional prefix,
@Const @ByRef PropertyVector properties,
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompileTimeEmptyString.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompileTimeEmptyString.java
index 3b493e6d8cb..3f07592738f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CompileTimeEmptyString.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CompileTimeEmptyString.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java b/pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java
index 778fdee0f15..1e6ede7a4dd 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/CompiledNodeArgs.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,13 +13,77 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
-@Namespace("torch::dynamo::autograd") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
+
+@Namespace("torch::dynamo::autograd") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class CompiledNodeArgs extends Pointer {
- /** Empty constructor. Calls {@code super((Pointer)null)}. */
- public CompiledNodeArgs() { super((Pointer)null); }
+ static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public CompiledNodeArgs(Pointer p) { super(p); }
+
+ public native void collect(@Const @ByRef DynamoTensorArg t);
+
+ public native void collect(@Const @ByRef Tensor t);
+ public native void collect(@Const @ByRef SymInt t);
+ public native void collect(@Const @ByRef IValue iv);
+ public native void collect(@Const @ByRef Scalar t);
+ public native void collect(@Const @ByRef TensorOptions t);
+ public native void collect(@Const @ByRef TensorGeometry t);
+ public native void collect(@Const @ByRef Device t);
+ public native void collect(@StdString BytePointer t);
+ public native void collect(@StdString String t);
+ public native void collect(@Const @ByRef TypeMeta t);
+ public native void collect(@Cast({"", "const std::shared_ptr"}) @SharedPtr Node t);
+ public native void collect(@Const @ByRef NodeCall t);
+ public native void collect(@Const @ByRef Edge t);
+ public native void collect(@Const @ByRef VariableInfo t);
+ public native @Cast("bool") boolean cond(@Cast("bool") boolean cond);
+
+// #define COLLECT_AS_BYTES(T)
+// void collect(T t) {
+// specialize_on_bytes(t);
+// }
+ public native void collect(ScalarType t);
+ public native void collect(DeviceType t);
+ public native void collect(@Cast("c10::DeviceType") byte t);
+ public native void collect(Layout t);
+ public native void collect(MemoryFormat t);
+ public native void collect(short t);
+ public native void collect(int t);
+ public native void collect(@Cast("int64_t") long t);
+ public native void collect(@Cast("bool") boolean t);
+ public native void collect(float t);
+ public native void collect(double t);
+// #undef COLLECT_AS_BYTES
+
+ public native void collect_hooks_from(Node fn);
+
+ public native @ByVal CacheKey key();
+
+ public native @Cast("size_t") long add_backward(@ByRef(true) SafePyObject obj);
+
+ public native @Cast("size_t") long add_backward_state(@ByRef(true) SafePyObject obj);
+
+ public native void add_tensor_pre_hook(@ByRef(true) SafePyObject obj, int index);
+
+ public native void add_pre_hook(@ByRef(true) SafePyObject obj);
+
+ public native void add_post_hook(@ByRef(true) SafePyObject obj);
+
+ public native void add_post_acc_grad_hook(@ByRef(true) SafePyObject obj);
+
+ // Need to template the size_t to silence internal 32-bit build errors due to
+ // a mix of -Werror, -Wtautological-type-limit-compare and
+ // -Wunknown-pragmas
+
+ public native SizeInput.DynType set_default_dyn_type(SizeInput.DynType default_dyn_type);
+ public native @Cast("torch::dynamo::autograd::SizeInput::DynType") byte set_default_dyn_type(@Cast("torch::dynamo::autograd::SizeInput::DynType") byte default_dyn_type);
+
+ public CompiledNodeArgs(@ByRef AutogradCompilerCall compiler, @ByRef NodeCall node_call) { super((Pointer)null); allocate(compiler, node_call); }
+ private native void allocate(@ByRef AutogradCompilerCall compiler, @ByRef NodeCall node_call);
+
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexType.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexType.java
index 2b4228b06da..ef122442d94 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexType.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexType.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexTypePtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexTypePtr.java
index 40343406921..a8f92c42f3f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexTypePtr.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ComplexTypePtr.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Compound.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Compound.java
index 097c118f750..84cedd936d1 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Compound.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Compound.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -29,7 +30,7 @@ public class Compound extends Tree {
public Compound(int kind, @Const @ByRef SourceRange range_, @Cast("torch::jit::TreeList*") @ByRef(true) SymDimVector trees_) { super((Pointer)null); allocate(kind, range_, trees_); }
private native void allocate(int kind, @Const @ByRef SourceRange range_, @Cast("torch::jit::TreeList*") @ByRef(true) SymDimVector trees_);
public native @Cast("const torch::jit::TreeList*") @ByRef SymDimVector trees();
- public static native @ByVal TreeRef create(
+ public static native @IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree create(
int kind,
@Const @ByRef SourceRange range_,
@Cast("torch::jit::TreeList*") @ByRef(true) SymDimVector trees_);
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstExpr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstExpr.java
index 9f15c3265af..13ec4184bca 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstExpr.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstExpr.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -24,8 +25,8 @@ public class ConstExpr extends Expr {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ConstExpr(Pointer p) { super(p); }
- public ConstExpr(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); }
- private native void allocate(@Const @ByRef TreeRef tree);
+ public ConstExpr(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree) { super((Pointer)null); allocate(tree); }
+ private native void allocate(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree);
public native @Cast("bool") boolean isFloatingPoint();
public native @Cast("bool") boolean isIntegral();
public native @Cast("bool") boolean isComplex();
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java
index c756a5be903..aedd9ad6744 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConstantPad1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies ConstantPad over a 1-D input.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.ConstantPad1d to learn
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.ConstantPad1d to learn
* about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::ConstantPad1dOptions} class to learn
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java
index ba3b49901cd..9cf6889f720 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java
index eb4a8ed8c7b..6879bed85d7 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class ConstantPad1dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dOptions.java
index df804685b72..bd6ff48021f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad1dOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java
index 120ff47601e..c7335c30896 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConstantPad2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies ConstantPad over a 2-D input.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.ConstantPad2d to learn
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.ConstantPad2d to learn
* about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::ConstantPad2dOptions} class to learn
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java
index 84517d5fcb2..45781ef4327 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java
index 81323b5e5c0..34d9fde521d 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class ConstantPad2dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dOptions.java
index e6c3d2da67e..32db42c471c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad2dOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java
index 34fa4bda9f6..d9b6ff395c6 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ConstantPad3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies ConstantPad over a 3-D input.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.ConstantPad3d to learn
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.ConstantPad3d to learn
* about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::ConstantPad3dOptions} class to learn
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java
index df7449f8b7d..4fff865b8b0 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java
index 18409adf29e..35b21d86868 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class ConstantPad3dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dOptions.java
index cadeea7485d..f17b82fcfaa 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantPad3dOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantString.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantString.java
index 74e14d15f19..1e35d725874 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantString.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantString.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -26,11 +27,11 @@ public class ConstantString extends Pointer {
public ConstantString(Pointer p) { super(p); }
public ConstantString(@StdString BytePointer str) { super((Pointer)null); allocate(str); }
- private native void allocate(@StdString BytePointer str);
+ @IntrusivePtr @Name("c10::make_intrusive") private native void allocate(@StdString BytePointer str);
public ConstantString(@StdString String str) { super((Pointer)null); allocate(str); }
- private native void allocate(@StdString String str);
- public static native @ByVal ConstantStringPtr create(@StdString BytePointer str_);
- public static native @ByVal ConstantStringPtr create(@StdString String str_);
+ @IntrusivePtr @Name("c10::make_intrusive") private native void allocate(@StdString String str);
+ public static native @IntrusivePtr("c10::ivalue::ConstantString") @Cast({"", "c10::intrusive_ptr&"}) ConstantString create(@StdString BytePointer str_);
+ public static native @IntrusivePtr("c10::ivalue::ConstantString") @Cast({"", "c10::intrusive_ptr&"}) ConstantString create(@StdString String str_);
public native @StdString BytePointer string();
public native @StringView BytePointer string_view();
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantStringPtr.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantStringPtr.java
deleted file mode 100644
index 5953da42afc..00000000000
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConstantStringPtr.java
+++ /dev/null
@@ -1,154 +0,0 @@
-// Targeted by JavaCPP version 1.5.11-SNAPSHOT: DO NOT EDIT THIS FILE
-
-package org.bytedeco.pytorch;
-
-import org.bytedeco.pytorch.Allocator;
-import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
-import org.bytedeco.pytorch.Module;
-import org.bytedeco.javacpp.annotation.Cast;
-import java.nio.*;
-import org.bytedeco.javacpp.*;
-import org.bytedeco.javacpp.annotation.*;
-
-import static org.bytedeco.javacpp.presets.javacpp.*;
-import static org.bytedeco.openblas.global.openblas_nolapack.*;
-import static org.bytedeco.openblas.global.openblas.*;
-
-import static org.bytedeco.pytorch.global.torch.*;
-
-
-@Name("c10::intrusive_ptr") @NoOffset @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
-public class ConstantStringPtr extends Pointer {
- static { Loader.load(); }
- /** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
- public ConstantStringPtr(Pointer p) { super(p); }
- /** Native array allocator. Access with {@link Pointer#position(long)}. */
- public ConstantStringPtr(long size) { super((Pointer)null); allocateArray(size); }
- private native void allocateArray(long size);
- @Override public ConstantStringPtr position(long position) {
- return (ConstantStringPtr)super.position(position);
- }
- @Override public ConstantStringPtr getPointer(long i) {
- return new ConstantStringPtr((Pointer)this).offsetAddress(i);
- }
-
-
- public ConstantStringPtr() { super((Pointer)null); allocate(); }
- @NoException(true) private native void allocate();
-
- public ConstantStringPtr(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0) { super((Pointer)null); allocate(arg0); }
- @NoException(true) private native void allocate(@ByVal @Cast("std::nullptr_t*") PointerPointer arg0);
-
- // This constructor will not increase the ref counter for you.
- // We use the tagged dispatch mechanism to explicitly mark this constructor
- // to not increase the refcount
- public ConstantStringPtr(ConstantString target, @ByVal DontIncreaseRefcount arg1) { super((Pointer)null); allocate(target, arg1); }
- @NoException(true) private native void allocate(ConstantString target, @ByVal DontIncreaseRefcount arg1);
-
-
-
- public ConstantStringPtr(@ByRef(true) ConstantStringPtr rhs) { super((Pointer)null); allocate(rhs); }
- @NoException(true) private native void allocate(@ByRef(true) ConstantStringPtr rhs);
-
- public native @ByRef @Name("operator =") @NoException(true) ConstantStringPtr put(@ByRef(true) ConstantStringPtr rhs);
-
- // Assignment is implemented using copy and swap. That's safe for self
- // assignment.
- // NOLINTNEXTLINE(bugprone-unhandled-self-assignment)
-
- public native @NoException(true) ConstantString get();
-
- public native @ByRef @Name("operator *") @NoException(true) ConstantString multiply();
-
- public native @Name("operator ->") @NoException(true) ConstantString access();
-
- public native @Cast("bool") @Name("operator bool") @NoException(true) boolean asBoolean();
-
- public native @NoException(true) void reset();
-
- public native @NoException(true) void swap(@ByRef ConstantStringPtr rhs);
-
- // We do a lot of null-pointer checks in our code, good to have this be cheap.
- public native @Cast("bool") @NoException(true) boolean defined();
-
- public native @Cast("uint32_t") @NoException(true) int use_count();
-
- public native @Cast("uint32_t") @NoException(true) int weak_use_count();
-
- public native @Cast("bool") @NoException(true) boolean unique();
-
- /**
- * Returns an owning (!) pointer to the underlying object and makes the
- * intrusive_ptr instance invalid. That means the refcount is not decreased.
- * You *must* put the returned pointer back into a intrusive_ptr using
- * intrusive_ptr::reclaim(ptr) to properly destruct it.
- * This is helpful for C APIs.
- */
- public native @NoException(true) ConstantString release();
-
- /**
- * Takes an owning pointer to TTarget* and creates an intrusive_ptr that takes
- * over ownership. That means the refcount is not increased.
- * This is the counter-part to intrusive_ptr::release() and the pointer
- * passed in *must* have been created using intrusive_ptr::release().
- */
- public static native @ByVal ConstantStringPtr reclaim(ConstantString owning_ptr);
-
- /**
- * Takes an owning pointer to TTarget* and creates an intrusive_ptr
- * representing a new reference, i.e. the raw pointer retains
- * ownership.
- */
- public static native @ByVal ConstantStringPtr reclaim_copy(ConstantString owning_ptr);
-
- /**
- * Allocate a heap object with args and wrap it inside a intrusive_ptr and
- * incref. This is a helper function to let make_intrusive() access private
- * intrusive_ptr constructors.
- */
-
- /**
- * Turn a new instance of TTarget (e.g., literally allocated
- * using new TTarget(...) into an intrusive_ptr. If possible,
- * use intrusive_ptr::make instead which statically guarantees
- * that the allocation was done properly.
- *
- * At the moment, the only reason this method exists is because
- * pybind11 holder types expect to be able to allocate in
- * this way (because pybind11 handles the new allocation itself).
- */
- public static native @ByVal ConstantStringPtr unsafe_steal_from_new(ConstantString raw_ptr);
-
- /**
- * Turn an instance of TTarget that should not be reference counted
- * (e.g., allocated into an arena with placement new) into an
- * intrusive_ptr. This is gratuitously unsafe and should only be
- * used if you can guarantee that the pointer will not escape and be
- * refcounted as normal.
- *
- * {@code expected_decrefs} is a debugging parameter: it indicates the
- * number of strong owners the intrusive_ptr_target in question is
- * expected to get. In most use cases, this will likely be 1.
- *
- * The reason this method exists is for manually sharing
- * StorageImpls across Tensors in the static runtime. It needs
- * access to private intrusive_ptr members so that the refcounts can
- * be initialized to custom values.
- */
- public static native @ByVal ConstantStringPtr unsafe_adapt_non_heap_allocated(
- ConstantString raw_ptr,
- @Cast("uint32_t") int expected_decrefs);
-
- /**
- * Turn a **non-owning raw pointer** to an intrusive_ptr. It is
- * the moral equivalent of enable_shared_from_this on a shared pointer.
- *
- * This method is only valid for objects that are already live. If
- * you are looking for the moral equivalent of unique_ptr(T*)
- * constructor, see steal_from_new.
- *
- * TODO: https://github.com/pytorch/pytorch/issues/56482
- */
- public static native @ByVal ConstantStringPtr unsafe_reclaim_from_nonowning(ConstantString raw_ptr);
-}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java
index 0a261ba302a..148e6fd68be 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Context.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -38,7 +39,7 @@ public class Context extends Pointer {
public native @Const @ByRef Generator defaultGenerator(@ByVal Device device);
public native @Const @ByRef AcceleratorHooksInterface getAcceleratorHooksInterface(
- @ByVal(nullValue = "c10::optional(c10::nullopt)") DeviceTypeOptional opt_device_type);
+ @ByVal(nullValue = "std::optional(c10::nullopt)") DeviceTypeOptional opt_device_type);
public native @Const @ByRef AcceleratorHooksInterface getAcceleratorHooksInterface();
public native @ByVal Device getDeviceFromPtr(Pointer data, DeviceType device_type);
public native @ByVal Device getDeviceFromPtr(Pointer data, @Cast("c10::DeviceType") byte device_type);
@@ -55,18 +56,20 @@ public class Context extends Pointer {
public static native @Cast("bool") boolean hasCuDNN();
public static native long versionCuDNN();
public static native @Cast("bool") boolean hasCuSOLVER();
+ public static native @Cast("bool") boolean hasCuBLASLt();
public static native @Cast("bool") boolean hasHIP();
public static native @Cast("bool") boolean hasMPS();
public static native @Cast("bool") boolean hasIPU();
public static native @Cast("bool") boolean hasXLA();
public static native @Cast("bool") boolean hasXPU();
public static native @Cast("bool") boolean hasLazy();
- public static native @Cast("bool") boolean hasORT();
+ public static native @Cast("bool") boolean hasMAIA();
// defined in header so that getNonVariableType has ability to inline
// call_once check. getNonVariableType is called fairly frequently
public native void lazyInitCUDA();
public native void lazyInitHIP();
public native void lazyInitXPU();
+ public native void lazyInitMTIA();
public native void lazyInitPrivateUse1();
public static native @Cast("const at::cuda::NVRTC*") @ByRef Pointer getNVRTC();
@@ -116,6 +119,10 @@ public class Context extends Pointer {
public native void setLinalgPreferredBackend(LinalgBackend arg0);
public native void setLinalgPreferredBackend(@Cast("at::LinalgBackend") byte arg0);
+ public native BlasBackend blasPreferredBackend();
+ public native void setBlasPreferredBackend(BlasBackend arg0);
+ public native void setBlasPreferredBackend(@Cast("at::BlasBackend") byte arg0);
+
// Note [Enabling Deterministic Operations]
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
// Operations in PyTorch that normally act nondeterministically, but have an
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Continue.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Continue.java
index 0a47ef3e3db..c3482512ca1 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Continue.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Continue.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -24,7 +25,7 @@ public class Continue extends Stmt {
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public Continue(Pointer p) { super(p); }
- public Continue(@Const @ByRef TreeRef tree) { super((Pointer)null); allocate(tree); }
- private native void allocate(@Const @ByRef TreeRef tree);
+ public Continue(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree) { super((Pointer)null); allocate(tree); }
+ private native void allocate(@IntrusivePtr("torch::jit::Tree") @Cast({"", "c10::intrusive_ptr&"}) Tree tree);
public static native @ByVal Continue create(@Const @ByRef SourceRange range);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dFuncOptions.java
index f0ab894ff77..1c1b422df7c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dFuncOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dFuncOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java
index 9598c469633..5706051a968 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Conv1d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies convolution over a 1-D input.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.Conv1d to learn about
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.Conv1d to learn about
* the exact behavior of this module.
*
* See the documentation for {@code torch::nn::Conv1dOptions} class to learn what
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java
index f41914c9da3..995da7cfd6a 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java
index 33989a694a0..c074380c569 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class Conv1dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dOptions.java
index c33496b60be..0c0d40a649d 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
// namespace detail
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dPadding.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dPadding.java
index 4b7fec2f254..a53e34a8299 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dPadding.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv1dPadding.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dFuncOptions.java
index e584973c787..2aee8d5876f 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dFuncOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dFuncOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java
index bcbba04019d..b33153d3e40 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Conv2d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies convolution over a 2-D input.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.Conv2d to learn about
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.Conv2d to learn about
* the exact behavior of this module.
*
* See the documentation for {@code torch::nn::Conv2dOptions} class to learn what
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java
index 69e7a14c0b7..ef8fae66a6b 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java
index fa81cc3a2c5..fddb42dce8a 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class Conv2dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dOptions.java
index ba32723ac78..3957baec392 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dPadding.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dPadding.java
index 61c4261f4b2..64929a64365 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dPadding.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv2dPadding.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dFuncOptions.java
index a2d7a206d87..1d5ba6e1e66 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dFuncOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dFuncOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java
index 43ec04d34ab..0990a9fd25d 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -21,7 +22,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Conv3d ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies convolution over a 3-D input.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.Conv3d to learn about
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.Conv3d to learn about
* the exact behavior of this module.
*
* See the documentation for {@code torch::nn::Conv3dOptions} class to learn what
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java
index 466040b9962..e47b595e7d3 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java
index a67ab190ce9..81aa2a34c37 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class Conv3dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dOptions.java
index e7dd8351238..19014e17648 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dPadding.java b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dPadding.java
index 86affc897c2..5cebe7a38fc 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dPadding.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/Conv3dPadding.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvPaddingMode.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvPaddingMode.java
index 6454ff02e98..bb99d013380 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvPaddingMode.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvPaddingMode.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dFuncOptions.java
index 1dee0a27b19..05f4d981c2c 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dFuncOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dFuncOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java
index 70c2af64001..0d180745418 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -22,7 +23,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies the ConvTranspose1d function.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.ConvTranspose1d to
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.ConvTranspose1d to
* learn about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::ConvTranspose1dOptions} class to learn
@@ -51,10 +52,10 @@ public ConvTranspose1dImpl(
@SharedPtr @Name("std::make_shared") private native void allocate(@ByVal ConvTranspose1dOptions options_);
public native @ByVal Tensor forward(
@Const @ByRef Tensor input,
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") LongArrayRefOptional output_size);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") LongArrayRefOptional output_size);
public native @ByVal Tensor forward(
@Const @ByRef Tensor input);
public native @ByVal Tensor forward(
@Const @ByRef Tensor input,
- @ByRef(nullValue = "c10::optional(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size);
+ @ByRef(nullValue = "std::optional(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java
index 486f8e6de8c..903bd4f22eb 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java
index 7fc4690f041..8ad86c01ff2 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplBaseBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java
index ca6eb38739d..fab0daed646 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class ConvTranspose1dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") DeviceOptional device);
public native @SharedPtr("torch::nn::Module") @ByVal Module clone();
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dOptions.java
index f8cf246f305..392da4398ec 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose1dOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
// namespace functional
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dFuncOptions.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dFuncOptions.java
index 336ac8f6073..21ac7f0b7c0 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dFuncOptions.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dFuncOptions.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java
index f1ac7203ac1..e00009621c0 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImpl.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -22,7 +23,7 @@
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/** Applies the ConvTranspose2d function.
- * See https://pytorch.org/docs/master/nn.html#torch.nn.ConvTranspose2d to
+ * See https://pytorch.org/docs/main/nn.html#torch.nn.ConvTranspose2d to
* learn about the exact behavior of this module.
*
* See the documentation for {@code torch::nn::ConvTranspose2dOptions} class to learn
@@ -51,10 +52,10 @@ public ConvTranspose2dImpl(
@SharedPtr @Name("std::make_shared") private native void allocate(@ByVal ConvTranspose2dOptions options_);
public native @ByVal Tensor forward(
@Const @ByRef Tensor input,
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") LongArrayRefOptional output_size);
+ @Const @ByRef(nullValue = "std::optional(c10::nullopt)") LongArrayRefOptional output_size);
public native @ByVal Tensor forward(
@Const @ByRef Tensor input);
public native @ByVal Tensor forward(
@Const @ByRef Tensor input,
- @ByRef(nullValue = "c10::optional(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size);
+ @ByRef(nullValue = "std::optional(c10::nullopt)") @Cast({"int64_t*", "c10::ArrayRef", "std::vector&"}) @StdVector long... output_size);
}
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java
index 2b6747cda3e..b34d97a1da6 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java
index 2c6a0855d50..3fb46498c6e 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplBaseBase.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
diff --git a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java
index dd57a5ddc90..47ec0dc5f2a 100644
--- a/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java
+++ b/pytorch/src/gen/java/org/bytedeco/pytorch/ConvTranspose2dImplCloneable.java
@@ -4,7 +4,6 @@
import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
-import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
@@ -14,6 +13,8 @@
import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;
+import org.bytedeco.javacpp.chrono.*;
+import static org.bytedeco.javacpp.global.chrono.*;
import static org.bytedeco.pytorch.global.torch.*;
@@ -32,6 +33,6 @@ public class ConvTranspose2dImplCloneable extends Module {
* and submodules in the cloned module are different from those in the
* original module. */
public native @SharedPtr("torch::nn::Module") @ByVal Module clone(
- @Const @ByRef(nullValue = "c10::optional(c10::nullopt)") DeviceOptional device);
+ @Const @ByRef(nullValue = "std::optional