diff --git a/Makefile b/Makefile index 8df5c90..282f6f5 100644 --- a/Makefile +++ b/Makefile @@ -12,11 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -MODULE := github.com/nvidia/go-gpuallocator +MODULE := github.com/NVIDIA/go-gpuallocator DOCKER ?= docker -GOLANG_VERSION := 1.15 +GOLANG_VERSION := 1.20 ifeq ($(IMAGE),) REGISTRY ?= nvidia @@ -94,11 +94,10 @@ coverage: test $(DOCKER_TARGETS): docker-%: .build-image @echo "Running 'make $(*)' in docker container $(BUILDIMAGE)" $(DOCKER) run \ - --rm \ + --rm --privileged \ -e GOCACHE=/tmp/.cache \ - -v $(PWD):$(PWD) \ + -v $(PWD):$(PWD):z \ -w $(PWD) \ - --user $$(id -u):$$(id -g) \ $(BUILDIMAGE) \ make $(*) diff --git a/docker/Dockerfile.devel b/docker/Dockerfile.devel index d3a8308..cead53d 100644 --- a/docker/Dockerfile.devel +++ b/docker/Dockerfile.devel @@ -11,7 +11,8 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -ARG GOLANG_VERSION=1.15 +ARG GOLANG_VERSION=1.20 FROM golang:${GOLANG_VERSION} -RUN go get -u golang.org/x/lint/golint +RUN go install golang.org/x/lint/golint@latest +RUN go install github.com/matryer/moq@latest diff --git a/go.mod b/go.mod index 09cab6a..065e1a9 100644 --- a/go.mod +++ b/go.mod @@ -1,8 +1,11 @@ module github.com/NVIDIA/go-gpuallocator -go 1.15 +go 1.20 -require github.com/NVIDIA/gpu-monitoring-tools v0.0.0-20201109160820-d08ea3cdcce4 +require ( + github.com/NVIDIA/go-nvml v0.12.0-1 + gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20230711114409-e6fcecfa6790 +) replace ( k8s.io/api => k8s.io/api v0.18.2 diff --git a/go.sum b/go.sum index 9ddf4bb..21e5ae0 100644 --- a/go.sum +++ b/go.sum @@ -1,702 +1,8 @@ -bitbucket.org/bertimus9/systemstat v0.0.0-20180207000608-0eeff89b0690/go.mod h1:Ulb78X89vxKYgdL24HMTiXYHlyHEvruOj1ZPlqeNEZM= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc= -github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= -github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14= -github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab/go.mod h1:3VYc5hodBMJ5+l/7J4xAyMeuM2PNuepvHlGs8yilUCA= -github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= -github.com/NVIDIA/gpu-monitoring-tools v0.0.0-20201109160820-d08ea3cdcce4 h1:6KSetbMgb2MieLm34BNJKiEuiP5Tj9Tr94wTipnlYDA= -github.com/NVIDIA/gpu-monitoring-tools v0.0.0-20201109160820-d08ea3cdcce4/go.mod h1:l0Cq257MSJMvg9URCXUjc8pgKY2SK1oSvIx6qG0bzzc= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/OpenPeeDeeP/depguard v1.0.0/go.mod h1:7/4sitnI9YlQgTLLk734QlzXT8DuHVnAyztLplQjk+o= -github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= -github.com/Rican7/retry v0.1.0/go.mod h1:FgOROf8P5bebcC1DS0PdOQiqGUridaZvikzUmkFW6gg= -github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= -github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= -github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= -github.com/auth0/go-jwt-middleware v0.0.0-20170425171159-5493cabe49f7/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= -github.com/aws/aws-sdk-go v1.28.2/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/bazelbuild/bazel-gazelle v0.18.2/go.mod h1:D0ehMSbS+vesFsLGiD6JXu3mVEzOlfUl8wNnq+x/9p0= -github.com/bazelbuild/bazel-gazelle v0.19.1-0.20191105222053-70208cbdc798/go.mod h1:rPwzNHUqEzngx1iVBfO/2X2npKaT3tqPqqHW6rVsn/A= -github.com/bazelbuild/buildtools v0.0.0-20190731111112-f720930ceb60/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= -github.com/bazelbuild/buildtools v0.0.0-20190917191645-69366ca98f89/go.mod h1:5JP0TXzWDHXv8qvxRC4InIazwdyDseBDbzESUMKk1yU= -github.com/bazelbuild/rules_go v0.0.0-20190719190356-6dae44dc5cab/go.mod h1:MC23Dc/wkXEyk3Wpq6lCqz0ZAYOZDw2DR5y3N1q2i7M= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bifurcation/mint v0.0.0-20180715133206-93c51c6ce115/go.mod h1:zVt7zX3K/aDCk9Tj+VM7YymsX66ERvzCJzw8rFCX2JU= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/caddyserver/caddy v1.0.3/go.mod h1:G+ouvOY32gENkJC+jhgl62TyhvqEsFaDiZ4uw0RzP1E= -github.com/cenkalti/backoff v2.1.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c/go.mod h1:Xe6ZsFhtM8HrDku0pxJ3/Lr51rwykrzgFwpmTzleatY= -github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= -github.com/checkpoint-restore/go-criu v0.0.0-20181120144056-17b0214f6c48/go.mod h1:TrMrLQfeENAPYPRsJuq3jsqdlRh3lvi6trTZJG8+tho= -github.com/cheekybits/genny v0.0.0-20170328200008-9127e812e1e9/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= -github.com/cilium/ebpf v0.0.0-20191025125908-95b36a581eed/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/clusterhq/flocker-go v0.0.0-20160920122132-2b8b7259d313/go.mod h1:P1wt9Z3DP8O6W3rvwCt0REIlshg1InHImaLW0t3ObY0= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codegangsta/negroni v1.0.0/go.mod h1:v0y3T5G7Y1UlFfyxFn/QLRU4a2EuNau2iZY63YTKWo0= -github.com/container-storage-interface/spec v1.2.0/go.mod h1:6URME8mwIBbpVyZV93Ce5St17xBiQJQY67NDsuohiy4= -github.com/containerd/console v0.0.0-20170925154832-84eeaae905fa/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/containerd v1.0.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/typeurl v0.0.0-20190228175220-2a93cfde8c20/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY= -github.com/coredns/corefile-migration v1.0.6/go.mod h1:OFwBp/Wc9dJt5cAZzHWMNhK1r5L0p0jDwIBc6j8NC8E= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= -github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.3.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= -github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/libnetwork v0.8.0-dev.2.0.20190925143933-c8a5fca4a652/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= -github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/euank/go-kmsg-parser v2.0.0+incompatible/go.mod h1:MhmAMZ8V4CYH4ybgdRwPr2TU5ThnS43puaKEMpja1uw= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= -github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= -github.com/fatih/color v1.6.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-acme/lego v2.5.0+incompatible/go.mod h1:yzMNe9CasVUhkquNvti5nAtPmG94USbYxYrZfTkIn0M= -github.com/go-bindata/go-bindata v3.1.1+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= -github.com/go-critic/go-critic v0.3.5-0.20190526074819-1df300866540/go.mod h1:+sE8vrLDS2M0pZkBk0wy6+nLdKexVDrl/jBqQOTDThA= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= -github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= -github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= -github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= -github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU= -github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= -github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= -github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= -github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= -github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8= -github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= -github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= -github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= -github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= -github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= -github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo= -github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= -github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= -github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= -github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= -github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= -github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= -github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= -github.com/go-ozzo/ozzo-validation v3.5.0+incompatible/go.mod h1:gsEKFIVnabGBt6mXmxK0MoFy+cZoTJY6mu5Ll3LVLBU= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4= -github.com/go-toolsmith/astcopy v1.0.0/go.mod h1:vrgyG+5Bxrnz4MZWPF+pI4R8h3qKRjjyvV/DSez4WVQ= -github.com/go-toolsmith/astequal v0.0.0-20180903214952-dcb477bfacd6/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astequal v1.0.0/go.mod h1:H+xSiq0+LtiDC11+h1G32h7Of5O3CYFJ99GVbS5lDKY= -github.com/go-toolsmith/astfmt v0.0.0-20180903215011-8f8ee99c3086/go.mod h1:mP93XdblcopXwlyN4X4uodxXQhldPGZbcEJIimQHrkg= -github.com/go-toolsmith/astfmt v1.0.0/go.mod h1:cnWmsOAuq4jJY6Ct5YWlVLmcmLMn1JUPuQIHCY7CJDw= -github.com/go-toolsmith/astinfo v0.0.0-20180906194353-9809ff7efb21/go.mod h1:dDStQCHtmZpYOmjRP/8gHHnCCch3Zz3oEgCdZVdtweU= -github.com/go-toolsmith/astp v0.0.0-20180903215135-0af7e3c24f30/go.mod h1:SV2ur98SGypH1UjcPpCatrV5hPazG6+IfNHbkDXBRrk= -github.com/go-toolsmith/astp v1.0.0/go.mod h1:RSyrtpVlfTFGDYRbrjyWP1pYu//tSFcvdYrA8meBmLI= -github.com/go-toolsmith/pkgload v0.0.0-20181119091011-e9e65178eee8/go.mod h1:WoMrjiy4zvdS+Bg6z9jZH82QXwkcgCBX6nOfnmdaHks= -github.com/go-toolsmith/pkgload v1.0.0/go.mod h1:5eFArkbO80v7Z0kdngIxsRXRMTaX4Ilcwuh3clNrQJc= -github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslWct4wyljWhSRy8= -github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU= -github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus v0.0.0-20181101234600-2ff6f7ffd60f/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.0.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= -github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a/go.mod h1:ryS0uhF+x9jgbj/N71xsEqODy9BN81/GonCZiOzirOk= -github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6/go.mod h1:DbHgvLiFKX1Sh2T1w8Q/h4NAI8MHIpzCdnBUDTXU3I0= -github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= -github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c/go.mod h1:unzUULGw35sjyOYjUt0jMTXqHlZPpPc6e+xfO4cd6mM= -github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3/go.mod h1:JXrF4TWy4tXYn62/9x8Wm/K/dm06p8tCKwFRDPZG/1o= -github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee/go.mod h1:ozx7R9SIwqmqf5pRP90DhR2Oay2UIjGuKheCBCNwAYU= -github.com/golangci/gofmt v0.0.0-20181222123516-0b8337e80d98/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.18.0/go.mod h1:kaqo8l0OZKYPtjNmG4z4HrWLgcYNIJ9B9q3LWri9uLg= -github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547/go.mod h1:0qUabqiIQgfmlAmulqxyiGkkyF6/tOGSnY2cnPVwrzU= -github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc/go.mod h1:e5tpTHCfVze+7EpLEozzMB3eafxo2KT5veNg1k6byQU= -github.com/golangci/lint-1 v0.0.0-20190420132249-ee948d087217/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= -github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o= -github.com/golangci/misspell v0.0.0-20180809174111-950f5d19e770/go.mod h1:dEbvlSfYbMQDtrpRMQU675gSDLDNa8sCPPChZ7PhiVA= -github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21/go.mod h1:tf5+bzsHdTM0bsB7+8mt0GUMvjCgwLpTapNZHU8AajI= -github.com/golangci/revgrep v0.0.0-20180526074752-d9c87f5ffaf0/go.mod h1:qOQCunEYvmd/TLamH+7LlVccLvUH5kZNhbCgTHoBbp4= -github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4/go.mod h1:Izgrg8RkN3rCIMLGE9CyYmU9pY2Jer6DgANEnZ/L/cQ= -github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAONeMXrxql8uvOKuAZSu8aM5RUGv+1C6IJaEho= -github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8= -github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= -github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/cadvisor v0.35.0/go.mod h1:1nql6U13uTHaLYB8rLS5x9IJc2qT6Xd/Tr1sTX6NE48= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= -github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE= -github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/golang-lru v0.0.0-20180201235237-0fb14efe8c47/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/hcl v0.0.0-20180404174102-ef8a98b0bbce/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/heketi/heketi v9.0.1-0.20190917153846-c2e2a4ab7ab9+incompatible/go.mod h1:bB9ly3RchcQqsQ9CpyaQwvva7RS5ytVoSoholZQON6o= -github.com/heketi/tests v0.0.0-20151005000721-f3775cbcefd6/go.mod h1:xGMAM8JLi7UkZt1i4FQeQy0R2T8GLUwQhOP5M1gBhy4= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= -github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jimstudt/http-authentication v0.0.0-20140401203705-3eca13d6893a/go.mod h1:wK6yTYYcgjHE1Z1QtXACPDjcFJyBskHEdagmnq3vsP8= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= -github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/karrick/godirwalk v1.7.5/go.mod h1:2c9FRhkDxdIbgkOnCEvnSWs71Bhugbl46shStcFDJ34= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= -github.com/kisielk/gotool v0.0.0-20161130080628-0de1eaf82fa3/go.mod h1:jxZFDH7ILpTPQTk+E2s+z4CUas9lVNjIuKR4c5/zKgM= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/libopenstorage/openstorage v1.0.0/go.mod h1:Sp1sIObHjat1BeXhfMqLZ14wnOzEhNx2YQedreMcUyc= -github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= -github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= -github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/lpabon/godbc v0.1.1/go.mod h1:Jo9QV0cf3U6jZABgiJ2skINAXb9j8m51r07g4KI92ZA= -github.com/lucas-clemente/aes12 v0.0.0-20171027163421-cd47fb39b79f/go.mod h1:JpH9J1c9oX6otFSgdUHwUBUizmKlrMjxWnIAjff4m04= -github.com/lucas-clemente/quic-clients v0.1.0/go.mod h1:y5xVIEoObKqULIKivu+gD/LU90pL73bTdtQjPBvtCBk= -github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H70QZ/CXoxqw9bzao= -github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58= -github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= -github.com/marten-seemann/qtls v0.2.3/go.mod h1:xzjG7avBwGGbdZ8dTGxlBnLArsVKLvwmjgmPuiQEcYk= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-shellwords v1.0.5/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= -github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mesos/mesos-go v0.0.9/go.mod h1:kPYCMQ9gsOXVAle1OsoY4I1+9kPu8GHkf88aV59fDr4= -github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY= -github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.4/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/mindprince/gonvml v0.0.0-20190828220739-9ebdce4bb989/go.mod h1:2eu9pRWp8mo84xCg6KswZ+USQHjwgRhNp06sozOdsTY= -github.com/mistifyio/go-zfs v2.1.1+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-ps v0.0.0-20170309133038-4fdf99ab2936/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk= -github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= -github.com/mitchellh/mapstructure v0.0.0-20180220230111-00c29f56e238/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mohae/deepcopy v0.0.0-20170603005431-491d3605edfb/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mozilla/tls-observatory v0.0.0-20180409132520-8791a200eb40/go.mod h1:SrKMQvPiws7F7iqYp8/TX+IhxCYhzr6N/1yb8cwHsGk= -github.com/mrunalp/fileutils v0.0.0-20171103030105-7d4729fb3618/go.mod h1:x8F1gnqOkIEiO4rqoeEEEqQbo7HjGMTvyoq3gej4iT0= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/mvdan/xurls v1.1.0/go.mod h1:tQlNn3BED8bE/15hnSL2HLkDeLWpNPAwtw7wkEq44oU= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0= -github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E= -github.com/nbutton23/zxcvbn-go v0.0.0-20160627004424-a22cb81b2ecd/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= -github.com/nbutton23/zxcvbn-go v0.0.0-20171102151520-eafdab6b0663/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runtime-spec v1.0.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opencontainers/selinux v1.3.1-0.20190929122143-5215b1806f52/go.mod h1:+BLncwf63G4dgOzykXAxcmnFlUaOlkDdmw/CqsW6pjs= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.1.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= -github.com/pquerna/ffjson v0.0.0-20180717144149-af8b230fcd20/go.mod h1:YARuvh7BUWHNhzDq2OM5tzR2RiCcN2D7sapiKyCel/M= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/quasilyte/go-consistent v0.0.0-20190521200055-c6f3937de18c/go.mod h1:5STLWrekHfjyYwxBRVRXNOSewLJ3PWfDJd1VyTS21fI= -github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI= -github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= -github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= -github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= -github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/go-glob v0.0.0-20170128012129-256dc444b735/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= -github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= -github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc= -github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= -github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= -github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE= -github.com/spf13/afero v1.1.0/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= -github.com/spf13/cast v1.2.0/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.2/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/jwalterweatherman v0.0.0-20180109140146-7c0cea34c8ec/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.0.2/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/storageos/go-api v0.0.0-20180912212459-343b3eff91fc/go.mod h1:ZrLn+e0ZuF3Y65PNF6dIwbJPZqfmtCXxFm9ckv0agOY= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= -github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= -github.com/thecodeteam/goscaleio v0.1.0/go.mod h1:68sdkZAsK8bvEwBlbQnlLS+xU+hvLYM/iQ8KXej1AwM= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/timakin/bodyclose v0.0.0-20190721030226-87058b9bfcec/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ultraware/funlen v0.0.1/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli/v2 v2.2.0/go.mod h1:SE9GqnLQmjVa0iPEY0f1w3ygNIYcIJ0OKPMoW2caLfQ= -github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= -github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasthttp v1.2.0/go.mod h1:4vX61m6KN+xDduDNwXrhIAVZaZaZiQ1luJk8LWSxF3s= -github.com/valyala/quicktemplate v1.1.1/go.mod h1:EH+4AkTd43SvgIbQHYu59/cJyxDoOVRUAfrukLPuGJ4= -github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= -github.com/vishvananda/netns v0.0.0-20171111001504-be1fbeda1936/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= -github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= -golang.org/x/build v0.0.0-20190927031335-2835ba2e683f/go.mod h1:fYw7AShPAhGMdXqA9gRadk/CcMsvLlClpE5oBwnS3dM= -golang.org/x/crypto v0.0.0-20180426230345-b49d69b5da94/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190123085648-057139ce5d2b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190424203555-c05e17bb3b2d/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= -golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= -golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= -golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20170915142106-8351a756f30f/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180911220305-26e67e76b6c3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181102091132-c10e9556a7bc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190320064053-1272bf9dcd53/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190328230028-74de082e2cca/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190502183928-7f726cade0ab/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20171026204733-164713f0dfce/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190122071731-054c452bb702/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190502175342-a43fa875dd82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200413165638-669c56c373c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.0.0-20170915090833-1cbadb444a80/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20170915040203-e531a2a1c15f/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181117154741-2ddaf7f79a09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190121143147-24cd39ecf745/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190122202912-9c309ee22fab/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190311215038-5c2858a9cfe5/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190322203728-c1a832b0ad89/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190909030654-5b82db07426d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= -gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485/go.mod h1:2ltnJ7xHfj0zHS40VVPYEAAMTa3ZGguvHGBSJeRWqE0= -gonum.org/v1/gonum v0.6.2/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU= -gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= -gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ= -gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= -google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.6.1-0.20190607001116-5213b8090861/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.0/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= -gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/mcuadros/go-syslog.v2 v2.2.1/go.mod h1:l5LPIyOOyIdQquNg+oU6Z3524YwrcqEm0aKH+5zpt2U= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gotest.tools v2.1.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/gotestsum v0.3.5/go.mod h1:Mnf3e5FUzXbkCfynWBGOwLssY7gTQgCHObK9tMpAriY= -grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= -honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= -k8s.io/api v0.18.2/go.mod h1:SJCWI7OLzhZSvbY7U8zwNl9UA4o1fizoug34OV/2r78= -k8s.io/apiextensions-apiserver v0.18.2/go.mod h1:q3faSnRGmYimiocj6cHQ1I3WpLqmDgJFlKL37fC4ZvY= -k8s.io/apimachinery v0.18.2/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= -k8s.io/apiserver v0.18.2/go.mod h1:Xbh066NqrZO8cbsoenCwyDJ1OSi8Ag8I2lezeHxzwzw= -k8s.io/cli-runtime v0.18.2/go.mod h1:yfFR2sQQzDsV0VEKGZtrJwEy4hLZ2oj4ZIfodgxAHWQ= -k8s.io/client-go v0.18.2/go.mod h1:Xcm5wVGXX9HAA2JJ2sSBUn3tCJ+4SVlCbl2MNNv+CIU= -k8s.io/cloud-provider v0.18.2/go.mod h1:t1HjnQN2l5wK/fORo/yyu0Q+bZTYuReHYCIpi/qqfms= -k8s.io/cluster-bootstrap v0.18.2/go.mod h1:lHDOrHDzZi3eQE9bYMFpkwwUuLYiAiBuQuHaAnoGWTk= -k8s.io/code-generator v0.18.2/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= -k8s.io/component-base v0.18.2/go.mod h1:kqLlMuhJNHQ9lz8Z7V5bxUUtjFZnrypArGl58gmDfUM= -k8s.io/cri-api v0.18.2/go.mod h1:OJtpjDvfsKoLGhvcc0qfygved0S0dGX56IJzPbqTG1s= -k8s.io/csi-translation-lib v0.18.2/go.mod h1:2lyXP0OP6MuzAEde802d4L/Rhzj4teNdNBKGVxVKV78= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/heapster v1.2.0-beta.1/go.mod h1:h1uhptVXMwC8xtZBYsPXKVi8fpdlYkTs6k949KozGrM= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-aggregator v0.18.2/go.mod h1:ijq6FnNUoKinA6kKbkN6svdTacSoQVNtKqmQ1+XJEYQ= -k8s.io/kube-controller-manager v0.18.2/go.mod h1:v45wCqexTrOltgwj92V4ve7hm5f70GQzi4a47/RQ0HQ= -k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/kube-proxy v0.18.2/go.mod h1:VTgyDMdylYGgHVqLQo/Nt4yDWkh/LRsSnxRiG8GVgDo= -k8s.io/kube-scheduler v0.18.2/go.mod h1:dL+C0Hp/ahQOQK3BsgmV8btb3BtMZvz6ONUw/v1N8sk= -k8s.io/kubectl v0.18.2/go.mod h1:OdgFa3AlsPKRpFFYE7ICTwulXOcMGXHTc+UKhHKvrb4= -k8s.io/kubelet v0.18.2/go.mod h1:7x/nzlIWJLg7vOfmbQ4lgsYazEB0gOhjiYiHK1Gii4M= -k8s.io/kubernetes v1.18.2/go.mod h1:z8xjOOO1Ljz+TaHpOxVGC7cxtF32TesIamoQ+BZrVS0= -k8s.io/legacy-cloud-providers v0.18.2/go.mod h1:zzFRqgDC6cP1SgPl7lMmo1fjILDZ+bsNtTjLnxAfgI0= -k8s.io/metrics v0.18.2/go.mod h1:qga8E7QfYNR9Q89cSCAjinC9pTZ7yv1XSVGUB0vJypg= -k8s.io/repo-infra v0.0.1-alpha.1/go.mod h1:wO1t9WaB99V80ljbeENTnayuEEwNZt7gECYh/CEyOJ8= -k8s.io/sample-apiserver v0.18.2/go.mod h1:qYk6alcVIlWzmypsSmsWw5Kj4eUNr5jzJZZFJDUXwXE= -k8s.io/system-validators v1.0.4/go.mod h1:HgSgTg4NAGNoYYjKsUyk52gdNi2PVDswQ9Iyn66R7NI= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= -modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= -modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= -modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= -modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= -modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= -mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc= -mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= -mvdan.cc/unparam v0.0.0-20190209190245-fbb59629db34/go.mod h1:H6SUd1XjIs+qQCyskXg5OFSrilMRUkD8ePJpHKDPaeY= -rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0= -sigs.k8s.io/kustomize v2.0.3+incompatible/go.mod h1:MkjgH3RdOWrievjo6c9T245dYlB5QeXV4WCbnt/PEpU= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= -vbom.ml/util v0.0.0-20160121211510-db5cfe13f5cc/go.mod h1:so/NYdZXCz+E3ZpW0uAoCj6uzU2+8OWDFv/HxUSs7kI= +github.com/NVIDIA/go-nvml v0.12.0-1 h1:6mdjtlFo+17dWL7VFPfuRMtf0061TF4DKls9pkSw6uM= +github.com/NVIDIA/go-nvml v0.12.0-1/go.mod h1:hy7HYeQy335x6nEss0Ne3PYqleRa6Ct+VKD9RQ4nyFs= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20230711114409-e6fcecfa6790 h1:seMiNJ+XnvRr3QZ4Q4mv4AfiGd94rYKUVJyPqU/qGRM= +gitlab.com/nvidia/cloud-native/go-nvlib v0.0.0-20230711114409-e6fcecfa6790/go.mod h1:KYZksBgh18o+uzgnpDazzG4LVYtnfB96VXHMXypEtik= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= diff --git a/gpuallocator/allocator.go b/gpuallocator/allocator.go index b56bccf..2e2aa61 100644 --- a/gpuallocator/allocator.go +++ b/gpuallocator/allocator.go @@ -1,4 +1,18 @@ -// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ package gpuallocator @@ -6,7 +20,7 @@ import ( "fmt" "runtime" - "github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml" + "github.com/NVIDIA/go-gpuallocator/internal/gpulib" ) // Allocator defines the primary object for allocating and freeing the @@ -45,9 +59,9 @@ func NewBestEffortAllocator() (*Allocator, error) { // NewAllocator creates a new Allocator using the given allocation policy func NewAllocator(policy Policy) (*Allocator, error) { - err := nvml.Init() - if err != nil { - return nil, fmt.Errorf("error initializing NVML: %v", err) + ret := gpulib.Init() + if ret.Value() != gpulib.SUCCESS { + return nil, fmt.Errorf("error initializing NVML: %v", ret.Error()) } devices, err := NewDevices() @@ -58,8 +72,8 @@ func NewAllocator(policy Policy) (*Allocator, error) { allocator := newAllocatorFrom(devices, policy) runtime.SetFinalizer(allocator, func(allocator *Allocator) { - // Explicitly ignore any errors from nvml.Shutdown(). - _ = nvml.Shutdown() + // Explicitly ignore any errors from gpulib.Shutdown(). + _ = gpulib.Shutdown() }) return allocator, nil diff --git a/gpuallocator/besteffort_policy.go b/gpuallocator/besteffort_policy.go index acb697e..02c876e 100644 --- a/gpuallocator/besteffort_policy.go +++ b/gpuallocator/besteffort_policy.go @@ -1,11 +1,25 @@ -// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. +/** +# Copyright (c) NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ package gpuallocator import ( "fmt" - "github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml" + "github.com/NVIDIA/go-gpuallocator/internal/gpulib" ) type bestEffortPolicy struct{} @@ -313,41 +327,41 @@ func calculateGPUPairScore(gpu0 *Device, gpu1 *Device) int { for _, link := range gpu0.Links[gpu1.Index] { switch link.Type { - case nvml.P2PLinkCrossCPU: + case gpulib.P2PLinkCrossCPU: score += 10 - case nvml.P2PLinkSameCPU: + case gpulib.P2PLinkSameCPU: score += 20 - case nvml.P2PLinkHostBridge: + case gpulib.P2PLinkHostBridge: score += 30 - case nvml.P2PLinkMultiSwitch: + case gpulib.P2PLinkMultiSwitch: score += 40 - case nvml.P2PLinkSingleSwitch: + case gpulib.P2PLinkSingleSwitch: score += 50 - case nvml.P2PLinkSameBoard: + case gpulib.P2PLinkSameBoard: score += 60 - case nvml.SingleNVLINKLink: + case gpulib.SingleNVLINKLink: score += 100 - case nvml.TwoNVLINKLinks: + case gpulib.TwoNVLINKLinks: score += 200 - case nvml.ThreeNVLINKLinks: + case gpulib.ThreeNVLINKLinks: score += 300 - case nvml.FourNVLINKLinks: + case gpulib.FourNVLINKLinks: score += 400 - case nvml.FiveNVLINKLinks: + case gpulib.FiveNVLINKLinks: score += 500 - case nvml.SixNVLINKLinks: + case gpulib.SixNVLINKLinks: score += 600 - case nvml.SevenNVLINKLinks: + case gpulib.SevenNVLINKLinks: score += 700 - case nvml.EightNVLINKLinks: + case gpulib.EightNVLINKLinks: score += 800 - case nvml.NineNVLINKLinks: + case gpulib.NineNVLINKLinks: score += 900 - case nvml.TenNVLINKLinks: + case gpulib.TenNVLINKLinks: score += 1000 - case nvml.ElevenNVLINKLinks: + case gpulib.ElevenNVLINKLinks: score += 1100 - case nvml.TwelveNVLINKLinks: + case gpulib.TwelveNVLINKLinks: score += 1200 } } diff --git a/gpuallocator/common_test.go b/gpuallocator/common_test.go index c03c549..bb720a7 100644 --- a/gpuallocator/common_test.go +++ b/gpuallocator/common_test.go @@ -7,7 +7,7 @@ import ( "sort" "testing" - "github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml" + "github.com/NVIDIA/go-gpuallocator/internal/gpulib" ) const pad = ^int(0) @@ -109,24 +109,27 @@ func RunPolicyAllocTests(t *testing.T, policy Policy, tests []PolicyAllocTest) { } func NewTestGPU(index int) *TestGPU { - return &TestGPU{ - Index: index, - Device: &nvml.Device{ - UUID: fmt.Sprintf("GPU-%d", index), - PCI: nvml.PCIInfo{ - BusID: fmt.Sprintf("GPU-%d", index), - }, + id := fmt.Sprintf("GPU-%d", index) + + mockedSuccess := new(gpulib.Return) + mockedDevice := &gpulib.DeviceLiteMock{ + GetUUIDFunc: func() (string, gpulib.Return) { + return id, *mockedSuccess }, - Links: make(map[int][]P2PLink), + } + return &TestGPU{ + Index: index, + DeviceLite: mockedDevice, + Links: make(map[int][]P2PLink), } } -func (from *TestGPU) AddLink(to *TestGPU, linkType nvml.P2PLinkType) { +func (from *TestGPU) AddLink(to *TestGPU, linkType gpulib.P2PLinkType) { link := P2PLink{(*Device)(to), linkType} from.Links[to.Index] = append(from.Links[to.Index], link) } -func (n TestNode) AddLink(from, to int, linkType nvml.P2PLinkType) { +func (n TestNode) AddLink(from, to int, linkType gpulib.P2PLinkType) { n[from].AddLink(n[to], linkType) } @@ -147,20 +150,20 @@ func New4xRTX8000Node() TestNode { } // NVLinks - node.AddLink(0, 3, nvml.TwoNVLINKLinks) - node.AddLink(1, 2, nvml.TwoNVLINKLinks) - node.AddLink(2, 1, nvml.TwoNVLINKLinks) - node.AddLink(3, 0, nvml.TwoNVLINKLinks) + node.AddLink(0, 3, gpulib.TwoNVLINKLinks) + node.AddLink(1, 2, gpulib.TwoNVLINKLinks) + node.AddLink(2, 1, gpulib.TwoNVLINKLinks) + node.AddLink(3, 0, gpulib.TwoNVLINKLinks) // P2PLinks - node.AddLink(0, 1, nvml.P2PLinkSameCPU) - node.AddLink(0, 2, nvml.P2PLinkCrossCPU) - node.AddLink(1, 0, nvml.P2PLinkSameCPU) - node.AddLink(1, 3, nvml.P2PLinkCrossCPU) - node.AddLink(2, 0, nvml.P2PLinkCrossCPU) - node.AddLink(2, 3, nvml.P2PLinkSameCPU) - node.AddLink(3, 1, nvml.P2PLinkCrossCPU) - node.AddLink(3, 2, nvml.P2PLinkSameCPU) + node.AddLink(0, 1, gpulib.P2PLinkSameCPU) + node.AddLink(0, 2, gpulib.P2PLinkCrossCPU) + node.AddLink(1, 0, gpulib.P2PLinkSameCPU) + node.AddLink(1, 3, gpulib.P2PLinkCrossCPU) + node.AddLink(2, 0, gpulib.P2PLinkCrossCPU) + node.AddLink(2, 3, gpulib.P2PLinkSameCPU) + node.AddLink(3, 1, gpulib.P2PLinkCrossCPU) + node.AddLink(3, 2, gpulib.P2PLinkSameCPU) return node } @@ -178,110 +181,110 @@ func NewDGX1PascalNode() TestNode { } // NVLinks - node.AddLink(0, 1, nvml.SingleNVLINKLink) - node.AddLink(0, 2, nvml.SingleNVLINKLink) - node.AddLink(0, 3, nvml.SingleNVLINKLink) - node.AddLink(0, 4, nvml.SingleNVLINKLink) - - node.AddLink(1, 0, nvml.SingleNVLINKLink) - node.AddLink(1, 2, nvml.SingleNVLINKLink) - node.AddLink(1, 3, nvml.SingleNVLINKLink) - node.AddLink(1, 5, nvml.SingleNVLINKLink) - - node.AddLink(2, 0, nvml.SingleNVLINKLink) - node.AddLink(2, 1, nvml.SingleNVLINKLink) - node.AddLink(2, 3, nvml.SingleNVLINKLink) - node.AddLink(2, 6, nvml.SingleNVLINKLink) - - node.AddLink(3, 0, nvml.SingleNVLINKLink) - node.AddLink(3, 1, nvml.SingleNVLINKLink) - node.AddLink(3, 2, nvml.SingleNVLINKLink) - node.AddLink(3, 7, nvml.SingleNVLINKLink) - - node.AddLink(4, 0, nvml.SingleNVLINKLink) - node.AddLink(4, 5, nvml.SingleNVLINKLink) - node.AddLink(4, 6, nvml.SingleNVLINKLink) - node.AddLink(4, 7, nvml.SingleNVLINKLink) - - node.AddLink(5, 1, nvml.SingleNVLINKLink) - node.AddLink(5, 4, nvml.SingleNVLINKLink) - node.AddLink(5, 6, nvml.SingleNVLINKLink) - node.AddLink(5, 7, nvml.SingleNVLINKLink) - - node.AddLink(6, 2, nvml.SingleNVLINKLink) - node.AddLink(6, 4, nvml.SingleNVLINKLink) - node.AddLink(6, 5, nvml.SingleNVLINKLink) - node.AddLink(6, 7, nvml.SingleNVLINKLink) - - node.AddLink(7, 3, nvml.SingleNVLINKLink) - node.AddLink(7, 4, nvml.SingleNVLINKLink) - node.AddLink(7, 5, nvml.SingleNVLINKLink) - node.AddLink(7, 6, nvml.SingleNVLINKLink) + node.AddLink(0, 1, gpulib.SingleNVLINKLink) + node.AddLink(0, 2, gpulib.SingleNVLINKLink) + node.AddLink(0, 3, gpulib.SingleNVLINKLink) + node.AddLink(0, 4, gpulib.SingleNVLINKLink) + + node.AddLink(1, 0, gpulib.SingleNVLINKLink) + node.AddLink(1, 2, gpulib.SingleNVLINKLink) + node.AddLink(1, 3, gpulib.SingleNVLINKLink) + node.AddLink(1, 5, gpulib.SingleNVLINKLink) + + node.AddLink(2, 0, gpulib.SingleNVLINKLink) + node.AddLink(2, 1, gpulib.SingleNVLINKLink) + node.AddLink(2, 3, gpulib.SingleNVLINKLink) + node.AddLink(2, 6, gpulib.SingleNVLINKLink) + + node.AddLink(3, 0, gpulib.SingleNVLINKLink) + node.AddLink(3, 1, gpulib.SingleNVLINKLink) + node.AddLink(3, 2, gpulib.SingleNVLINKLink) + node.AddLink(3, 7, gpulib.SingleNVLINKLink) + + node.AddLink(4, 0, gpulib.SingleNVLINKLink) + node.AddLink(4, 5, gpulib.SingleNVLINKLink) + node.AddLink(4, 6, gpulib.SingleNVLINKLink) + node.AddLink(4, 7, gpulib.SingleNVLINKLink) + + node.AddLink(5, 1, gpulib.SingleNVLINKLink) + node.AddLink(5, 4, gpulib.SingleNVLINKLink) + node.AddLink(5, 6, gpulib.SingleNVLINKLink) + node.AddLink(5, 7, gpulib.SingleNVLINKLink) + + node.AddLink(6, 2, gpulib.SingleNVLINKLink) + node.AddLink(6, 4, gpulib.SingleNVLINKLink) + node.AddLink(6, 5, gpulib.SingleNVLINKLink) + node.AddLink(6, 7, gpulib.SingleNVLINKLink) + + node.AddLink(7, 3, gpulib.SingleNVLINKLink) + node.AddLink(7, 4, gpulib.SingleNVLINKLink) + node.AddLink(7, 5, gpulib.SingleNVLINKLink) + node.AddLink(7, 6, gpulib.SingleNVLINKLink) // P2PLinks - node.AddLink(0, 1, nvml.P2PLinkHostBridge) - node.AddLink(0, 2, nvml.P2PLinkSingleSwitch) - node.AddLink(0, 3, nvml.P2PLinkHostBridge) - node.AddLink(0, 4, nvml.P2PLinkCrossCPU) - node.AddLink(0, 5, nvml.P2PLinkCrossCPU) - node.AddLink(0, 6, nvml.P2PLinkCrossCPU) - node.AddLink(0, 7, nvml.P2PLinkCrossCPU) - - node.AddLink(1, 0, nvml.P2PLinkHostBridge) - node.AddLink(1, 2, nvml.P2PLinkHostBridge) - node.AddLink(1, 3, nvml.P2PLinkSingleSwitch) - node.AddLink(1, 4, nvml.P2PLinkCrossCPU) - node.AddLink(1, 5, nvml.P2PLinkCrossCPU) - node.AddLink(1, 6, nvml.P2PLinkCrossCPU) - node.AddLink(1, 7, nvml.P2PLinkCrossCPU) - - node.AddLink(2, 0, nvml.P2PLinkSingleSwitch) - node.AddLink(2, 1, nvml.P2PLinkHostBridge) - node.AddLink(2, 3, nvml.P2PLinkHostBridge) - node.AddLink(2, 4, nvml.P2PLinkCrossCPU) - node.AddLink(2, 5, nvml.P2PLinkCrossCPU) - node.AddLink(2, 6, nvml.P2PLinkCrossCPU) - node.AddLink(2, 7, nvml.P2PLinkCrossCPU) - - node.AddLink(3, 0, nvml.P2PLinkHostBridge) - node.AddLink(3, 1, nvml.P2PLinkSingleSwitch) - node.AddLink(3, 2, nvml.P2PLinkHostBridge) - node.AddLink(3, 4, nvml.P2PLinkCrossCPU) - node.AddLink(3, 5, nvml.P2PLinkCrossCPU) - node.AddLink(3, 6, nvml.P2PLinkCrossCPU) - node.AddLink(3, 7, nvml.P2PLinkCrossCPU) - - node.AddLink(4, 0, nvml.P2PLinkCrossCPU) - node.AddLink(4, 1, nvml.P2PLinkCrossCPU) - node.AddLink(4, 2, nvml.P2PLinkCrossCPU) - node.AddLink(4, 3, nvml.P2PLinkCrossCPU) - node.AddLink(4, 5, nvml.P2PLinkHostBridge) - node.AddLink(4, 6, nvml.P2PLinkSingleSwitch) - node.AddLink(4, 7, nvml.P2PLinkHostBridge) - - node.AddLink(5, 0, nvml.P2PLinkCrossCPU) - node.AddLink(5, 1, nvml.P2PLinkCrossCPU) - node.AddLink(5, 2, nvml.P2PLinkCrossCPU) - node.AddLink(5, 3, nvml.P2PLinkCrossCPU) - node.AddLink(5, 4, nvml.P2PLinkHostBridge) - node.AddLink(5, 6, nvml.P2PLinkHostBridge) - node.AddLink(5, 7, nvml.P2PLinkSingleSwitch) - - node.AddLink(6, 0, nvml.P2PLinkCrossCPU) - node.AddLink(6, 1, nvml.P2PLinkCrossCPU) - node.AddLink(6, 2, nvml.P2PLinkCrossCPU) - node.AddLink(6, 3, nvml.P2PLinkCrossCPU) - node.AddLink(6, 4, nvml.P2PLinkSingleSwitch) - node.AddLink(6, 5, nvml.P2PLinkHostBridge) - node.AddLink(6, 7, nvml.P2PLinkHostBridge) - - node.AddLink(7, 0, nvml.P2PLinkCrossCPU) - node.AddLink(7, 1, nvml.P2PLinkCrossCPU) - node.AddLink(7, 2, nvml.P2PLinkCrossCPU) - node.AddLink(7, 3, nvml.P2PLinkCrossCPU) - node.AddLink(7, 4, nvml.P2PLinkHostBridge) - node.AddLink(7, 5, nvml.P2PLinkSingleSwitch) - node.AddLink(7, 6, nvml.P2PLinkHostBridge) + node.AddLink(0, 1, gpulib.P2PLinkHostBridge) + node.AddLink(0, 2, gpulib.P2PLinkSingleSwitch) + node.AddLink(0, 3, gpulib.P2PLinkHostBridge) + node.AddLink(0, 4, gpulib.P2PLinkCrossCPU) + node.AddLink(0, 5, gpulib.P2PLinkCrossCPU) + node.AddLink(0, 6, gpulib.P2PLinkCrossCPU) + node.AddLink(0, 7, gpulib.P2PLinkCrossCPU) + + node.AddLink(1, 0, gpulib.P2PLinkHostBridge) + node.AddLink(1, 2, gpulib.P2PLinkHostBridge) + node.AddLink(1, 3, gpulib.P2PLinkSingleSwitch) + node.AddLink(1, 4, gpulib.P2PLinkCrossCPU) + node.AddLink(1, 5, gpulib.P2PLinkCrossCPU) + node.AddLink(1, 6, gpulib.P2PLinkCrossCPU) + node.AddLink(1, 7, gpulib.P2PLinkCrossCPU) + + node.AddLink(2, 0, gpulib.P2PLinkSingleSwitch) + node.AddLink(2, 1, gpulib.P2PLinkHostBridge) + node.AddLink(2, 3, gpulib.P2PLinkHostBridge) + node.AddLink(2, 4, gpulib.P2PLinkCrossCPU) + node.AddLink(2, 5, gpulib.P2PLinkCrossCPU) + node.AddLink(2, 6, gpulib.P2PLinkCrossCPU) + node.AddLink(2, 7, gpulib.P2PLinkCrossCPU) + + node.AddLink(3, 0, gpulib.P2PLinkHostBridge) + node.AddLink(3, 1, gpulib.P2PLinkSingleSwitch) + node.AddLink(3, 2, gpulib.P2PLinkHostBridge) + node.AddLink(3, 4, gpulib.P2PLinkCrossCPU) + node.AddLink(3, 5, gpulib.P2PLinkCrossCPU) + node.AddLink(3, 6, gpulib.P2PLinkCrossCPU) + node.AddLink(3, 7, gpulib.P2PLinkCrossCPU) + + node.AddLink(4, 0, gpulib.P2PLinkCrossCPU) + node.AddLink(4, 1, gpulib.P2PLinkCrossCPU) + node.AddLink(4, 2, gpulib.P2PLinkCrossCPU) + node.AddLink(4, 3, gpulib.P2PLinkCrossCPU) + node.AddLink(4, 5, gpulib.P2PLinkHostBridge) + node.AddLink(4, 6, gpulib.P2PLinkSingleSwitch) + node.AddLink(4, 7, gpulib.P2PLinkHostBridge) + + node.AddLink(5, 0, gpulib.P2PLinkCrossCPU) + node.AddLink(5, 1, gpulib.P2PLinkCrossCPU) + node.AddLink(5, 2, gpulib.P2PLinkCrossCPU) + node.AddLink(5, 3, gpulib.P2PLinkCrossCPU) + node.AddLink(5, 4, gpulib.P2PLinkHostBridge) + node.AddLink(5, 6, gpulib.P2PLinkHostBridge) + node.AddLink(5, 7, gpulib.P2PLinkSingleSwitch) + + node.AddLink(6, 0, gpulib.P2PLinkCrossCPU) + node.AddLink(6, 1, gpulib.P2PLinkCrossCPU) + node.AddLink(6, 2, gpulib.P2PLinkCrossCPU) + node.AddLink(6, 3, gpulib.P2PLinkCrossCPU) + node.AddLink(6, 4, gpulib.P2PLinkSingleSwitch) + node.AddLink(6, 5, gpulib.P2PLinkHostBridge) + node.AddLink(6, 7, gpulib.P2PLinkHostBridge) + + node.AddLink(7, 0, gpulib.P2PLinkCrossCPU) + node.AddLink(7, 1, gpulib.P2PLinkCrossCPU) + node.AddLink(7, 2, gpulib.P2PLinkCrossCPU) + node.AddLink(7, 3, gpulib.P2PLinkCrossCPU) + node.AddLink(7, 4, gpulib.P2PLinkHostBridge) + node.AddLink(7, 5, gpulib.P2PLinkSingleSwitch) + node.AddLink(7, 6, gpulib.P2PLinkHostBridge) return node } @@ -299,110 +302,110 @@ func NewDGX1VoltaNode() TestNode { } // NVLinks - node.AddLink(0, 1, nvml.SingleNVLINKLink) - node.AddLink(0, 2, nvml.SingleNVLINKLink) - node.AddLink(0, 3, nvml.TwoNVLINKLinks) - node.AddLink(0, 4, nvml.TwoNVLINKLinks) - - node.AddLink(1, 0, nvml.SingleNVLINKLink) - node.AddLink(1, 2, nvml.TwoNVLINKLinks) - node.AddLink(1, 3, nvml.SingleNVLINKLink) - node.AddLink(1, 5, nvml.TwoNVLINKLinks) - - node.AddLink(2, 0, nvml.SingleNVLINKLink) - node.AddLink(2, 1, nvml.TwoNVLINKLinks) - node.AddLink(2, 3, nvml.TwoNVLINKLinks) - node.AddLink(2, 6, nvml.SingleNVLINKLink) - - node.AddLink(3, 0, nvml.TwoNVLINKLinks) - node.AddLink(3, 1, nvml.SingleNVLINKLink) - node.AddLink(3, 2, nvml.TwoNVLINKLinks) - node.AddLink(3, 7, nvml.SingleNVLINKLink) - - node.AddLink(4, 0, nvml.TwoNVLINKLinks) - node.AddLink(4, 5, nvml.SingleNVLINKLink) - node.AddLink(4, 6, nvml.SingleNVLINKLink) - node.AddLink(4, 7, nvml.TwoNVLINKLinks) - - node.AddLink(5, 1, nvml.TwoNVLINKLinks) - node.AddLink(5, 4, nvml.SingleNVLINKLink) - node.AddLink(5, 6, nvml.TwoNVLINKLinks) - node.AddLink(5, 7, nvml.SingleNVLINKLink) - - node.AddLink(6, 2, nvml.SingleNVLINKLink) - node.AddLink(6, 4, nvml.SingleNVLINKLink) - node.AddLink(6, 5, nvml.TwoNVLINKLinks) - node.AddLink(6, 7, nvml.TwoNVLINKLinks) - - node.AddLink(7, 3, nvml.SingleNVLINKLink) - node.AddLink(7, 4, nvml.TwoNVLINKLinks) - node.AddLink(7, 5, nvml.SingleNVLINKLink) - node.AddLink(7, 6, nvml.TwoNVLINKLinks) + node.AddLink(0, 1, gpulib.SingleNVLINKLink) + node.AddLink(0, 2, gpulib.SingleNVLINKLink) + node.AddLink(0, 3, gpulib.TwoNVLINKLinks) + node.AddLink(0, 4, gpulib.TwoNVLINKLinks) + + node.AddLink(1, 0, gpulib.SingleNVLINKLink) + node.AddLink(1, 2, gpulib.TwoNVLINKLinks) + node.AddLink(1, 3, gpulib.SingleNVLINKLink) + node.AddLink(1, 5, gpulib.TwoNVLINKLinks) + + node.AddLink(2, 0, gpulib.SingleNVLINKLink) + node.AddLink(2, 1, gpulib.TwoNVLINKLinks) + node.AddLink(2, 3, gpulib.TwoNVLINKLinks) + node.AddLink(2, 6, gpulib.SingleNVLINKLink) + + node.AddLink(3, 0, gpulib.TwoNVLINKLinks) + node.AddLink(3, 1, gpulib.SingleNVLINKLink) + node.AddLink(3, 2, gpulib.TwoNVLINKLinks) + node.AddLink(3, 7, gpulib.SingleNVLINKLink) + + node.AddLink(4, 0, gpulib.TwoNVLINKLinks) + node.AddLink(4, 5, gpulib.SingleNVLINKLink) + node.AddLink(4, 6, gpulib.SingleNVLINKLink) + node.AddLink(4, 7, gpulib.TwoNVLINKLinks) + + node.AddLink(5, 1, gpulib.TwoNVLINKLinks) + node.AddLink(5, 4, gpulib.SingleNVLINKLink) + node.AddLink(5, 6, gpulib.TwoNVLINKLinks) + node.AddLink(5, 7, gpulib.SingleNVLINKLink) + + node.AddLink(6, 2, gpulib.SingleNVLINKLink) + node.AddLink(6, 4, gpulib.SingleNVLINKLink) + node.AddLink(6, 5, gpulib.TwoNVLINKLinks) + node.AddLink(6, 7, gpulib.TwoNVLINKLinks) + + node.AddLink(7, 3, gpulib.SingleNVLINKLink) + node.AddLink(7, 4, gpulib.TwoNVLINKLinks) + node.AddLink(7, 5, gpulib.SingleNVLINKLink) + node.AddLink(7, 6, gpulib.TwoNVLINKLinks) // P2PLinks - node.AddLink(0, 1, nvml.P2PLinkSingleSwitch) - node.AddLink(0, 2, nvml.P2PLinkHostBridge) - node.AddLink(0, 3, nvml.P2PLinkHostBridge) - node.AddLink(0, 4, nvml.P2PLinkCrossCPU) - node.AddLink(0, 5, nvml.P2PLinkCrossCPU) - node.AddLink(0, 6, nvml.P2PLinkCrossCPU) - node.AddLink(0, 7, nvml.P2PLinkCrossCPU) - - node.AddLink(1, 0, nvml.P2PLinkSingleSwitch) - node.AddLink(1, 2, nvml.P2PLinkHostBridge) - node.AddLink(1, 3, nvml.P2PLinkHostBridge) - node.AddLink(1, 4, nvml.P2PLinkCrossCPU) - node.AddLink(1, 5, nvml.P2PLinkCrossCPU) - node.AddLink(1, 6, nvml.P2PLinkCrossCPU) - node.AddLink(1, 7, nvml.P2PLinkCrossCPU) - - node.AddLink(2, 0, nvml.P2PLinkHostBridge) - node.AddLink(2, 1, nvml.P2PLinkHostBridge) - node.AddLink(2, 3, nvml.P2PLinkSingleSwitch) - node.AddLink(2, 4, nvml.P2PLinkCrossCPU) - node.AddLink(2, 5, nvml.P2PLinkCrossCPU) - node.AddLink(2, 6, nvml.P2PLinkCrossCPU) - node.AddLink(2, 7, nvml.P2PLinkCrossCPU) - - node.AddLink(3, 0, nvml.P2PLinkHostBridge) - node.AddLink(3, 1, nvml.P2PLinkHostBridge) - node.AddLink(3, 2, nvml.P2PLinkSingleSwitch) - node.AddLink(3, 4, nvml.P2PLinkCrossCPU) - node.AddLink(3, 5, nvml.P2PLinkCrossCPU) - node.AddLink(3, 6, nvml.P2PLinkCrossCPU) - node.AddLink(3, 7, nvml.P2PLinkCrossCPU) - - node.AddLink(4, 0, nvml.P2PLinkCrossCPU) - node.AddLink(4, 1, nvml.P2PLinkCrossCPU) - node.AddLink(4, 2, nvml.P2PLinkCrossCPU) - node.AddLink(4, 3, nvml.P2PLinkCrossCPU) - node.AddLink(4, 5, nvml.P2PLinkSingleSwitch) - node.AddLink(4, 6, nvml.P2PLinkHostBridge) - node.AddLink(4, 7, nvml.P2PLinkHostBridge) - - node.AddLink(5, 0, nvml.P2PLinkCrossCPU) - node.AddLink(5, 1, nvml.P2PLinkCrossCPU) - node.AddLink(5, 2, nvml.P2PLinkCrossCPU) - node.AddLink(5, 3, nvml.P2PLinkCrossCPU) - node.AddLink(5, 4, nvml.P2PLinkSingleSwitch) - node.AddLink(5, 6, nvml.P2PLinkHostBridge) - node.AddLink(5, 7, nvml.P2PLinkHostBridge) - - node.AddLink(6, 0, nvml.P2PLinkCrossCPU) - node.AddLink(6, 1, nvml.P2PLinkCrossCPU) - node.AddLink(6, 2, nvml.P2PLinkCrossCPU) - node.AddLink(6, 3, nvml.P2PLinkCrossCPU) - node.AddLink(6, 4, nvml.P2PLinkHostBridge) - node.AddLink(6, 5, nvml.P2PLinkHostBridge) - node.AddLink(6, 7, nvml.P2PLinkSingleSwitch) - - node.AddLink(7, 0, nvml.P2PLinkCrossCPU) - node.AddLink(7, 1, nvml.P2PLinkCrossCPU) - node.AddLink(7, 2, nvml.P2PLinkCrossCPU) - node.AddLink(7, 3, nvml.P2PLinkCrossCPU) - node.AddLink(7, 4, nvml.P2PLinkHostBridge) - node.AddLink(7, 5, nvml.P2PLinkHostBridge) - node.AddLink(7, 6, nvml.P2PLinkSingleSwitch) + node.AddLink(0, 1, gpulib.P2PLinkSingleSwitch) + node.AddLink(0, 2, gpulib.P2PLinkHostBridge) + node.AddLink(0, 3, gpulib.P2PLinkHostBridge) + node.AddLink(0, 4, gpulib.P2PLinkCrossCPU) + node.AddLink(0, 5, gpulib.P2PLinkCrossCPU) + node.AddLink(0, 6, gpulib.P2PLinkCrossCPU) + node.AddLink(0, 7, gpulib.P2PLinkCrossCPU) + + node.AddLink(1, 0, gpulib.P2PLinkSingleSwitch) + node.AddLink(1, 2, gpulib.P2PLinkHostBridge) + node.AddLink(1, 3, gpulib.P2PLinkHostBridge) + node.AddLink(1, 4, gpulib.P2PLinkCrossCPU) + node.AddLink(1, 5, gpulib.P2PLinkCrossCPU) + node.AddLink(1, 6, gpulib.P2PLinkCrossCPU) + node.AddLink(1, 7, gpulib.P2PLinkCrossCPU) + + node.AddLink(2, 0, gpulib.P2PLinkHostBridge) + node.AddLink(2, 1, gpulib.P2PLinkHostBridge) + node.AddLink(2, 3, gpulib.P2PLinkSingleSwitch) + node.AddLink(2, 4, gpulib.P2PLinkCrossCPU) + node.AddLink(2, 5, gpulib.P2PLinkCrossCPU) + node.AddLink(2, 6, gpulib.P2PLinkCrossCPU) + node.AddLink(2, 7, gpulib.P2PLinkCrossCPU) + + node.AddLink(3, 0, gpulib.P2PLinkHostBridge) + node.AddLink(3, 1, gpulib.P2PLinkHostBridge) + node.AddLink(3, 2, gpulib.P2PLinkSingleSwitch) + node.AddLink(3, 4, gpulib.P2PLinkCrossCPU) + node.AddLink(3, 5, gpulib.P2PLinkCrossCPU) + node.AddLink(3, 6, gpulib.P2PLinkCrossCPU) + node.AddLink(3, 7, gpulib.P2PLinkCrossCPU) + + node.AddLink(4, 0, gpulib.P2PLinkCrossCPU) + node.AddLink(4, 1, gpulib.P2PLinkCrossCPU) + node.AddLink(4, 2, gpulib.P2PLinkCrossCPU) + node.AddLink(4, 3, gpulib.P2PLinkCrossCPU) + node.AddLink(4, 5, gpulib.P2PLinkSingleSwitch) + node.AddLink(4, 6, gpulib.P2PLinkHostBridge) + node.AddLink(4, 7, gpulib.P2PLinkHostBridge) + + node.AddLink(5, 0, gpulib.P2PLinkCrossCPU) + node.AddLink(5, 1, gpulib.P2PLinkCrossCPU) + node.AddLink(5, 2, gpulib.P2PLinkCrossCPU) + node.AddLink(5, 3, gpulib.P2PLinkCrossCPU) + node.AddLink(5, 4, gpulib.P2PLinkSingleSwitch) + node.AddLink(5, 6, gpulib.P2PLinkHostBridge) + node.AddLink(5, 7, gpulib.P2PLinkHostBridge) + + node.AddLink(6, 0, gpulib.P2PLinkCrossCPU) + node.AddLink(6, 1, gpulib.P2PLinkCrossCPU) + node.AddLink(6, 2, gpulib.P2PLinkCrossCPU) + node.AddLink(6, 3, gpulib.P2PLinkCrossCPU) + node.AddLink(6, 4, gpulib.P2PLinkHostBridge) + node.AddLink(6, 5, gpulib.P2PLinkHostBridge) + node.AddLink(6, 7, gpulib.P2PLinkSingleSwitch) + + node.AddLink(7, 0, gpulib.P2PLinkCrossCPU) + node.AddLink(7, 1, gpulib.P2PLinkCrossCPU) + node.AddLink(7, 2, gpulib.P2PLinkCrossCPU) + node.AddLink(7, 3, gpulib.P2PLinkCrossCPU) + node.AddLink(7, 4, gpulib.P2PLinkHostBridge) + node.AddLink(7, 5, gpulib.P2PLinkHostBridge) + node.AddLink(7, 6, gpulib.P2PLinkSingleSwitch) return node } diff --git a/gpuallocator/device.go b/gpuallocator/device.go index eecd7f2..d282a38 100644 --- a/gpuallocator/device.go +++ b/gpuallocator/device.go @@ -4,16 +4,17 @@ package gpuallocator import ( "fmt" + "log" "sort" "strings" - "github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml" + "github.com/NVIDIA/go-gpuallocator/internal/gpulib" ) -// Device represents a GPU device as reported by NVML, including all of its +// Device represents a GPU device as reported by gpulib, including all of its // Point-to-Point link information. type Device struct { - *nvml.Device + gpulib.DeviceLite Index int Links map[int][]P2PLink } @@ -23,45 +24,50 @@ type Device struct { // contained in the P2PLink struct itself. type P2PLink struct { GPU *Device - Type nvml.P2PLinkType + Type gpulib.P2PLinkType } // DeviceSet is used to hold and manipulate a set of unique GPU devices. type DeviceSet map[string]*Device -// NewDevices creates a list of Devices from all available nvml.Devices. +// NewDevices creates a list of Devices from all available gpulib.Devices. func NewDevices() ([]*Device, error) { - count, err := nvml.GetDeviceCount() - if err != nil { - return nil, fmt.Errorf("error calling nvml.GetDeviceCount: %v", err) + count, ret := gpulib.DeviceGetCount() + if ret.Value() != gpulib.SUCCESS { + return nil, fmt.Errorf("error calling gpulib.GetDeviceCount: %v", ret.Error()) } devices := []*Device{} - for i := 0; i < int(count); i++ { - device, err := nvml.NewDevice(uint(i)) - if err != nil { - return nil, fmt.Errorf("error creating nvml.Device %v: %v", i, err) + for i := 0; i < count; i++ { + gpulibDevice, ret := gpulib.NewDeviceLite(i) + if ret.Value() != gpulib.SUCCESS { + return nil, fmt.Errorf("error creating gpulib.Device %v: %v", i, ret.Error()) } - devices = append(devices, &Device{device, i, make(map[int][]P2PLink)}) + device := Device{ + DeviceLite: gpulibDevice, + Index: i, + Links: make(map[int][]P2PLink), + } + devices = append(devices, &device) } for i, d1 := range devices { for j, d2 := range devices { if d1 != d2 { - p2plink, err := nvml.GetP2PLink(d1.Device, d2.Device) + p2plink, err := gpulib.GetP2PLink(d1, d2) if err != nil { return nil, fmt.Errorf("error getting P2PLink for devices (%v, %v): %v", i, j, err) } - if p2plink != nvml.P2PLinkUnknown { + if p2plink != gpulib.P2PLinkUnknown { d1.Links[d2.Index] = append(d1.Links[d2.Index], P2PLink{d2, p2plink}) } - nvlink, err := nvml.GetNVLink(d1.Device, d2.Device) + nvlink, err := gpulib.GetNVLink(d1, d2) if err != nil { return nil, fmt.Errorf("error getting NVLink for devices (%v, %v): %v", i, j, err) } - if nvlink != nvml.P2PLinkUnknown { + if nvlink != gpulib.P2PLinkUnknown { d1.Links[d2.Index] = append(d1.Links[d2.Index], P2PLink{d2, nvlink}) } } @@ -79,14 +85,21 @@ func NewDevicesFrom(uuids []string) ([]*Device, error) { } filtered := []*Device{} + + found := make(map[string]bool) for _, uuid := range uuids { for _, device := range devices { - if device.UUID == uuid { + id, ret := device.GetUUID() + if ret.Value() != gpulib.SUCCESS { + return nil, fmt.Errorf("failed to get device UUID: %v", ret.Error()) + } + if id == uuid { filtered = append(filtered, device) + found[id] = true break } } - if len(filtered) == 0 || filtered[len(filtered)-1].UUID != uuid { + if len(filtered) == 0 || !found[uuid] { return nil, fmt.Errorf("no device with uuid: %v", uuid) } } @@ -101,11 +114,24 @@ func (d *Device) String() string { // Details returns all details of a Device as a multi-line string. func (d *Device) Details() string { + uuid, ret := d.GetUUID() + if ret.Value() != gpulib.SUCCESS { + uuid = "UNKNOWN" + } + + var pciBusID string + pciInfo, ret := d.GetPciInfo() + if ret.Value() != gpulib.SUCCESS { + pciBusID = "UNKNOWN" + } else { + pciBusID = gpulib.NewPCIBusID(pciInfo).String() + } + s := "" s += fmt.Sprintf("Device %v:\n", d.Index) - s += fmt.Sprintf(" UUID: %v\n", d.UUID) - s += fmt.Sprintf(" PCI BusID: %v\n", d.PCI.BusID) - s += fmt.Sprintf(" SocketAffinity: %v\n", *d.CPUAffinity) + s += fmt.Sprintf(" UUID: %v\n", uuid) + s += fmt.Sprintf(" PCI BusID: %v\n", pciBusID) + s += fmt.Sprintf(" SocketAffinity: %v\n", d.CPUAffinity()) s += fmt.Sprintf(" Topology: \n") for gpu, links := range d.Links { s += fmt.Sprintf(" GPU %v Links:\n", gpu) @@ -117,6 +143,14 @@ func (d *Device) Details() string { return strings.TrimSuffix(s, "\n") } +func (d Device) uuidOrPanic() string { + uuid, ret := d.GetUUID() + if ret.Value() != gpulib.SUCCESS { + log.Panicf("could not get UUID for device: %v\n", ret.Error()) + } + return uuid +} + // NewDeviceSet creates a new DeviceSet. func NewDeviceSet(devices ...*Device) DeviceSet { set := make(DeviceSet) @@ -127,14 +161,16 @@ func NewDeviceSet(devices ...*Device) DeviceSet { // Insert inserts a list of devices into a DeviceSet. func (ds DeviceSet) Insert(devices ...*Device) { for _, device := range devices { - ds[device.UUID] = device + uuid := device.uuidOrPanic() + ds[uuid] = device } } // Delete deletes a list of devices from a DeviceSet. func (ds DeviceSet) Delete(devices ...*Device) { for _, device := range devices { - delete(ds, device.UUID) + uuid := device.uuidOrPanic() + delete(ds, uuid) } } @@ -144,7 +180,8 @@ func (ds DeviceSet) Contains(device *Device) bool { return false } - _, ok := ds[device.UUID] + uuid := device.uuidOrPanic() + _, ok := ds[uuid] return ok } diff --git a/internal/gpulib/consts.go b/internal/gpulib/consts.go new file mode 100644 index 0000000..1ae5dda --- /dev/null +++ b/internal/gpulib/consts.go @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package gpulib + +import ( + "github.com/NVIDIA/go-nvml/pkg/nvml" +) + +const ( + SUCCESS = nvml.SUCCESS + ERROR_UNINITIALIZED = nvml.ERROR_UNINITIALIZED + ERROR_INVALID_ARGUMENT = nvml.ERROR_INVALID_ARGUMENT + ERROR_NOT_SUPPORTED = nvml.ERROR_NOT_SUPPORTED + ERROR_NO_PERMISSION = nvml.ERROR_NO_PERMISSION + ERROR_ALREADY_INITIALIZED = nvml.ERROR_ALREADY_INITIALIZED + ERROR_NOT_FOUND = nvml.ERROR_NOT_FOUND + ERROR_INSUFFICIENT_SIZE = nvml.ERROR_INSUFFICIENT_SIZE + ERROR_INSUFFICIENT_POWER = nvml.ERROR_INSUFFICIENT_POWER + ERROR_DRIVER_NOT_LOADED = nvml.ERROR_DRIVER_NOT_LOADED + ERROR_TIMEOUT = nvml.ERROR_TIMEOUT + ERROR_IRQ_ISSUE = nvml.ERROR_IRQ_ISSUE + ERROR_LIBRARY_NOT_FOUND = nvml.ERROR_LIBRARY_NOT_FOUND + ERROR_FUNCTION_NOT_FOUND = nvml.ERROR_FUNCTION_NOT_FOUND + ERROR_CORRUPTED_INFOROM = nvml.ERROR_CORRUPTED_INFOROM + ERROR_GPU_IS_LOST = nvml.ERROR_GPU_IS_LOST + ERROR_RESET_REQUIRED = nvml.ERROR_RESET_REQUIRED + ERROR_OPERATING_SYSTEM = nvml.ERROR_OPERATING_SYSTEM + ERROR_LIB_RM_VERSION_MISMATCH = nvml.ERROR_LIB_RM_VERSION_MISMATCH + ERROR_IN_USE = nvml.ERROR_IN_USE + ERROR_MEMORY = nvml.ERROR_MEMORY + ERROR_NO_DATA = nvml.ERROR_NO_DATA + ERROR_VGPU_ECC_NOT_SUPPORTED = nvml.ERROR_VGPU_ECC_NOT_SUPPORTED + ERROR_INSUFFICIENT_RESOURCES = nvml.ERROR_INSUFFICIENT_RESOURCES + ERROR_UNKNOWN = nvml.ERROR_UNKNOWN +) + +const ( + DEVICE_MIG_ENABLE = nvml.DEVICE_MIG_ENABLE + DEVICE_MIG_DISABLE = nvml.DEVICE_MIG_DISABLE +) + +const ( + GPU_INSTANCE_PROFILE_1_SLICE = nvml.GPU_INSTANCE_PROFILE_1_SLICE + GPU_INSTANCE_PROFILE_2_SLICE = nvml.GPU_INSTANCE_PROFILE_2_SLICE + GPU_INSTANCE_PROFILE_3_SLICE = nvml.GPU_INSTANCE_PROFILE_3_SLICE + GPU_INSTANCE_PROFILE_4_SLICE = nvml.GPU_INSTANCE_PROFILE_4_SLICE + GPU_INSTANCE_PROFILE_7_SLICE = nvml.GPU_INSTANCE_PROFILE_7_SLICE + GPU_INSTANCE_PROFILE_8_SLICE = nvml.GPU_INSTANCE_PROFILE_8_SLICE + GPU_INSTANCE_PROFILE_COUNT = nvml.GPU_INSTANCE_PROFILE_COUNT +) + +const ( + COMPUTE_INSTANCE_PROFILE_1_SLICE = nvml.COMPUTE_INSTANCE_PROFILE_1_SLICE + COMPUTE_INSTANCE_PROFILE_2_SLICE = nvml.COMPUTE_INSTANCE_PROFILE_2_SLICE + COMPUTE_INSTANCE_PROFILE_3_SLICE = nvml.COMPUTE_INSTANCE_PROFILE_3_SLICE + COMPUTE_INSTANCE_PROFILE_4_SLICE = nvml.COMPUTE_INSTANCE_PROFILE_4_SLICE + COMPUTE_INSTANCE_PROFILE_7_SLICE = nvml.COMPUTE_INSTANCE_PROFILE_7_SLICE + COMPUTE_INSTANCE_PROFILE_8_SLICE = nvml.COMPUTE_INSTANCE_PROFILE_8_SLICE + COMPUTE_INSTANCE_PROFILE_COUNT = nvml.COMPUTE_INSTANCE_PROFILE_COUNT +) + +const ( + COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED = nvml.COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED + COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT = nvml.COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT +) + +const ( + TOPOLOGY_INTERNAL = nvml.TOPOLOGY_INTERNAL + TOPOLOGY_SINGLE = nvml.TOPOLOGY_SINGLE + TOPOLOGY_MULTIPLE = nvml.TOPOLOGY_MULTIPLE + TOPOLOGY_HOSTBRIDGE = nvml.TOPOLOGY_HOSTBRIDGE + TOPOLOGY_NODE = nvml.TOPOLOGY_NODE + TOPOLOGY_SYSTEM = nvml.TOPOLOGY_SYSTEM +) + +const ( + FEATURE_DISABLED = EnableState(nvml.FEATURE_DISABLED) + FEATURE_ENABLED = EnableState(nvml.FEATURE_ENABLED) +) +const ( + EventTypeXidCriticalError = nvml.EventTypeXidCriticalError +) diff --git a/internal/gpulib/device.go b/internal/gpulib/device.go new file mode 100644 index 0000000..b090fcc --- /dev/null +++ b/internal/gpulib/device.go @@ -0,0 +1,126 @@ +/** +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package gpulib + +import ( + "gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvml" +) + +const ( + CPUAffinityNotSupported = -1 +) + +//go:generate moq -stub -out device_mock.go . Device +type Device interface { + GetAttributes() (DeviceAttributes, Return) + GetComputeInstanceId() (int, Return) + GetDeviceHandleFromMigDeviceHandle() (Device, Return) + GetGpuInstanceId() (int, Return) + GetMaxMigDeviceCount() (int, Return) + GetMigDeviceHandleByIndex(int) (Device, Return) + GetMigMode() (int, int, Return) + GetMinorNumber() (int, Return) + GetNvLinkRemotePciInfo(int) (PciInfo, Return) + GetNvLinkState(int) (EnableState, Return) + GetPciInfo() (PciInfo, Return) + GetTopologyCommonAncestor(Device) (GpuTopologyLevel, Return) + GetUUID() (string, Return) + RegisterEvents(uint64, EventSet) Return +} + +type nvmlDevice nvml.Device + +var _ Device = (*nvmlDevice)(nil) + +func DeviceGetHandleByIndex(index int) (Device, Return) { + d, ret := nvml.DeviceGetHandleByIndex(index) + return nvmlDevice(d), nvmlReturn(ret) +} + +func (d nvmlDevice) GetAttributes() (DeviceAttributes, Return) { + a1, ret := nvml.Device(d).GetAttributes() + return DeviceAttributes(a1), nvmlReturn(ret) +} + +func (d nvmlDevice) GetComputeInstanceId() (int, Return) { + i1, ret := nvml.Device(d).GetComputeInstanceId() + return i1, nvmlReturn(ret) +} + +func (d nvmlDevice) GetDeviceHandleFromMigDeviceHandle() (Device, Return) { + d1, ret := nvml.Device(d).GetDeviceHandleFromMigDeviceHandle() + return nvmlDevice(d1), nvmlReturn(ret) +} + +func (d nvmlDevice) GetGpuInstanceId() (int, Return) { + i1, ret := nvml.Device(d).GetGpuInstanceId() + return i1, nvmlReturn(ret) +} + +func (d nvmlDevice) GetMaxMigDeviceCount() (int, Return) { + s1, ret := nvml.Device(d).GetMaxMigDeviceCount() + return s1, nvmlReturn(ret) +} + +func (d nvmlDevice) GetMigDeviceHandleByIndex(index int) (Device, Return) { + h, ret := nvml.Device(d).GetMigDeviceHandleByIndex(index) + return nvmlDevice(h), nvmlReturn(ret) +} + +func (d nvmlDevice) GetMigMode() (int, int, Return) { + s1, s2, ret := nvml.Device(d).GetMigMode() + return s1, s2, nvmlReturn(ret) +} + +func (d nvmlDevice) GetMinorNumber() (int, Return) { + i1, ret := nvml.Device(d).GetMinorNumber() + return i1, nvmlReturn(ret) +} + +func (d nvmlDevice) GetNvLinkRemotePciInfo(link int) (PciInfo, Return) { + p1, ret := nvml.Device(d).GetNvLinkRemotePciInfo(link) + return PciInfo(p1), nvmlReturn(ret) +} + +func (d nvmlDevice) GetNvLinkState(link int) (EnableState, Return) { + e1, ret := nvml.Device(d).GetNvLinkState(link) + return EnableState(e1), nvmlReturn(ret) +} + +func (d nvmlDevice) GetPciInfo() (PciInfo, Return) { + p1, ret := nvml.Device(d).GetPciInfo() + return PciInfo(p1), nvmlReturn(ret) +} + +func (d nvmlDevice) GetTopologyCommonAncestor(other Device) (GpuTopologyLevel, Return) { + otherD, ok := other.(nvmlDevice) + if !ok { + return GpuTopologyLevel(TOPOLOGY_INTERNAL), nvmlReturn(ERROR_UNKNOWN) + } + l1, ret := nvml.Device(d).GetTopologyCommonAncestor(nvml.Device(otherD)) + return GpuTopologyLevel(l1), nvmlReturn(ret) +} + +func (d nvmlDevice) GetUUID() (string, Return) { + s1, ret := nvml.Device(d).GetUUID() + return s1, nvmlReturn(ret) +} + +func (d nvmlDevice) RegisterEvents(EventTypes uint64, Set EventSet) Return { + ret := nvml.Device(d).RegisterEvents(EventTypes, nvml.EventSet(Set)) + return nvmlReturn(ret) +} diff --git a/internal/gpulib/device_lite.go b/internal/gpulib/device_lite.go new file mode 100644 index 0000000..cb7e3e2 --- /dev/null +++ b/internal/gpulib/device_lite.go @@ -0,0 +1,278 @@ +/** +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package gpulib + +import ( + "bytes" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + + "gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvml" +) + +type DeviceLite interface { + Device + + IsMigEnabled() (bool, Return) + GetMigDevices() ([]DeviceLite, Return) + Path() string + CPUAffinity() int64 +} + +// TODO: These composite functions should return error instead of Return +type nvmlDeviceLite struct { + Device + + mutex sync.Mutex + + uuid string + minor int + pciInfo PciInfo + path string + numaNode *int64 +} + +var _ DeviceLite = (*nvmlDeviceLite)(nil) + +func NewDeviceLite(index int) (DeviceLite, Return) { + device, ret := DeviceGetHandleByIndex(index) + if ret.Value() != SUCCESS { + return nil, ret + } + + lite, ret := newDeviceLite(device) + if ret.Value() != SUCCESS { + return nil, ret + } + + return lite, nvmlReturn(SUCCESS) +} + +func (d *nvmlDeviceLite) GetMinorNumber() (int, Return) { + return d.minor, nvmlReturn(SUCCESS) +} + +func (d *nvmlDeviceLite) GetPciInfo() (PciInfo, Return) { + return d.pciInfo, nvmlReturn(SUCCESS) +} + +func (d *nvmlDeviceLite) GetUUID() (string, Return) { + return d.uuid, nvmlReturn(SUCCESS) +} + +// TODO: Should these composite functions return errors instead? +func newDeviceLite(device Device) (DeviceLite, Return) { + uuid, ret := device.GetUUID() + if ret.Value() != SUCCESS { + return nil, ret + } + minor, ret := device.GetMinorNumber() + if ret.Value() != SUCCESS { + return nil, ret + } + pciInfo, ret := device.GetPciInfo() + if ret.Value() != SUCCESS { + return nil, ret + } + + lite := nvmlDeviceLite{ + Device: device, + uuid: uuid, + minor: minor, + pciInfo: pciInfo, + } + + return &lite, nvmlReturn(SUCCESS) +} + +func (d *nvmlDeviceLite) Path() string { + if d.path == "" { + d.mutex.Lock() + defer d.mutex.Unlock() + d.path = fmt.Sprintf("/dev/nvidia%d", d.minor) + } + + return d.path +} + +func (d *nvmlDeviceLite) CPUAffinity() int64 { + if d.numaNode == nil { + busID := NewPCIBusID(d.pciInfo) + node, _ := busID.NumaNode() + + d.mutex.Lock() + defer d.mutex.Unlock() + d.numaNode = &node + } + + return *d.numaNode +} + +func (d *nvmlDeviceLite) IsMigEnabled() (bool, Return) { + cm, pm, ret := d.GetMigMode() + if ret.Value() == ERROR_NOT_SUPPORTED { + return false, nvmlReturn(SUCCESS) + } + if ret.Value() != SUCCESS { + return false, ret + } + + return (cm == nvml.DEVICE_MIG_ENABLE) && (cm == pm), nvmlReturn(SUCCESS) +} + +func (d *nvmlDeviceLite) GetMigDevices() ([]DeviceLite, Return) { + c, ret := d.GetMaxMigDeviceCount() + if ret.Value() != SUCCESS { + return nil, ret + } + + var migHandles []DeviceLite + for i := 0; i < int(c); i++ { + mig, ret := d.GetMigDeviceHandleByIndex(i) + if ret.Value() == ERROR_NOT_FOUND { + continue + } + if ret.Value() != SUCCESS { + return nil, ret + } + + migLite, ret := newDeviceLite(mig) + if ret.Value() != SUCCESS { + return nil, ret + } + migHandles = append(migHandles, migLite) + } + + return migHandles, nvmlReturn(SUCCESS) +} + +// PCIBusID is the ID on the PCI bus of a device +type PCIBusID string + +// NewPCIBusID provides a utility function that returns the string representation +// of the bus ID. +func NewPCIBusID(p PciInfo) PCIBusID { + var bytes []byte + for _, b := range p.BusId { + if byte(b) == '\x00' { + break + } + bytes = append(bytes, byte(b)) + } + return PCIBusID(string(bytes)) +} + +func (p PCIBusID) String() string { + return string(p) +} + +func (p PCIBusID) NumaNode() (int64, error) { + b, err := os.ReadFile(p.numaNodePath()) + if err != nil { + return CPUAffinityNotSupported, nil + } + + node, err := strconv.ParseInt(string(bytes.TrimSpace(b)), 10, 8) + if err != nil { + return CPUAffinityNotSupported, fmt.Errorf("failed to parse numa_node contents: %v", err) + } + + if node < 0 { + return CPUAffinityNotSupported, nil + } + + return node, nil +} + +// numaNodePath returns the path for the numa_node file associated with the +// PCIBusID +func (p PCIBusID) numaNodePath() string { + id := strings.ToLower(p.String()) + + if strings.HasPrefix(id, "0000") { + id = id[4:] + } + return filepath.Join("/sys/bus/pci/devices", id, "numa_node") +} + +func ParseMigDeviceUUID(uuid string) (string, uint32, uint32, error) { + migDevice, ret := DeviceGetHandleByUUID(uuid) + if ret.Value() == SUCCESS { + parent, ret := migDevice.GetDeviceHandleFromMigDeviceHandle() + if ret.Value() != SUCCESS { + return "", 0, 0, fmt.Errorf("failed to get parent device handle: %v", ret.Error()) + } + parentUUID, ret := parent.GetUUID() + if ret.Value() != SUCCESS { + return "", 0, 0, fmt.Errorf("failed to get parent device UUID: %v", ret.Error()) + } + + gi, ret := migDevice.GetGpuInstanceId() + if ret.Value() != SUCCESS { + return "", 0, 0, fmt.Errorf("failed to get GPU instance ID: %v", ret.Error()) + } + ci, ret := migDevice.GetComputeInstanceId() + if ret.Value() != SUCCESS { + return "", 0, 0, fmt.Errorf("failed to get compute instance ID: %v", ret.Error()) + } + + return parentUUID, uint32(gi), uint32(ci), nil + } + + return parseMigDeviceUUID(uuid) +} + +func parseMigDeviceUUID(mig string) (string, uint32, uint32, error) { + tokens := strings.SplitN(mig, "-", 2) + if len(tokens) != 2 || tokens[0] != "MIG" { + return "", 0, 0, fmt.Errorf("failed to parse UUID as MIG device") + } + + tokens = strings.SplitN(tokens[1], "/", 3) + if len(tokens) != 3 || !strings.HasPrefix(tokens[0], "GPU-") { + return "", 0, 0, fmt.Errorf("failed to parse UUID as MIG device") + } + + gi, err := strconv.Atoi(tokens[1]) + if err != nil { + return "", 0, 0, fmt.Errorf("failed to parse UUID as MIG device") + } + + ci, err := strconv.Atoi(tokens[2]) + if err != nil { + return "", 0, 0, fmt.Errorf("failed to parse UUID as MIG device") + } + + return tokens[0], uint32(gi), uint32(ci), nil +} + +func (e EventData) GetUUID() (string, error) { + device := nvmlDevice(e.Device) + if device.Handle == nil { + return "", nil + } + uuid, ret := device.GetUUID() + if ret.Value() != SUCCESS { + return "", fmt.Errorf("failed to get UUID for device: %v", ret.Error()) + } + + return uuid, nil +} diff --git a/internal/gpulib/device_lite_mock.go b/internal/gpulib/device_lite_mock.go new file mode 100644 index 0000000..cb63f96 --- /dev/null +++ b/internal/gpulib/device_lite_mock.go @@ -0,0 +1,737 @@ +// Code generated by moq; DO NOT EDIT. +// github.com/matryer/moq + +package gpulib + +import ( + "sync" +) + +// Ensure, that DeviceLiteMock does implement DeviceLite. +// If this is not the case, regenerate this file with moq. +var _ DeviceLite = &DeviceLiteMock{} + +// DeviceLiteMock is a mock implementation of DeviceLite. +// +// func TestSomethingThatUsesDeviceLite(t *testing.T) { +// +// // make and configure a mocked DeviceLite +// mockedDeviceLite := &DeviceLiteMock{ +// CPUAffinityFunc: func() int64 { +// panic("mock out the CPUAffinity method") +// }, +// GetAttributesFunc: func() (DeviceAttributes, Return) { +// panic("mock out the GetAttributes method") +// }, +// GetComputeInstanceIdFunc: func() (int, Return) { +// panic("mock out the GetComputeInstanceId method") +// }, +// GetDeviceHandleFromMigDeviceHandleFunc: func() (Device, Return) { +// panic("mock out the GetDeviceHandleFromMigDeviceHandle method") +// }, +// GetGpuInstanceIdFunc: func() (int, Return) { +// panic("mock out the GetGpuInstanceId method") +// }, +// GetMaxMigDeviceCountFunc: func() (int, Return) { +// panic("mock out the GetMaxMigDeviceCount method") +// }, +// GetMigDeviceHandleByIndexFunc: func(n int) (Device, Return) { +// panic("mock out the GetMigDeviceHandleByIndex method") +// }, +// GetMigDevicesFunc: func() ([]DeviceLite, Return) { +// panic("mock out the GetMigDevices method") +// }, +// GetMigModeFunc: func() (int, int, Return) { +// panic("mock out the GetMigMode method") +// }, +// GetMinorNumberFunc: func() (int, Return) { +// panic("mock out the GetMinorNumber method") +// }, +// GetNvLinkRemotePciInfoFunc: func(n int) (PciInfo, Return) { +// panic("mock out the GetNvLinkRemotePciInfo method") +// }, +// GetNvLinkStateFunc: func(n int) (EnableState, Return) { +// panic("mock out the GetNvLinkState method") +// }, +// GetPciInfoFunc: func() (PciInfo, Return) { +// panic("mock out the GetPciInfo method") +// }, +// GetTopologyCommonAncestorFunc: func(device Device) (GpuTopologyLevel, Return) { +// panic("mock out the GetTopologyCommonAncestor method") +// }, +// GetUUIDFunc: func() (string, Return) { +// panic("mock out the GetUUID method") +// }, +// IsMigEnabledFunc: func() (bool, Return) { +// panic("mock out the IsMigEnabled method") +// }, +// PathFunc: func() string { +// panic("mock out the Path method") +// }, +// RegisterEventsFunc: func(v uint64, eventSet EventSet) Return { +// panic("mock out the RegisterEvents method") +// }, +// } +// +// // use mockedDeviceLite in code that requires DeviceLite +// // and then make assertions. +// +// } +type DeviceLiteMock struct { + // CPUAffinityFunc mocks the CPUAffinity method. + CPUAffinityFunc func() int64 + + // GetAttributesFunc mocks the GetAttributes method. + GetAttributesFunc func() (DeviceAttributes, Return) + + // GetComputeInstanceIdFunc mocks the GetComputeInstanceId method. + GetComputeInstanceIdFunc func() (int, Return) + + // GetDeviceHandleFromMigDeviceHandleFunc mocks the GetDeviceHandleFromMigDeviceHandle method. + GetDeviceHandleFromMigDeviceHandleFunc func() (Device, Return) + + // GetGpuInstanceIdFunc mocks the GetGpuInstanceId method. + GetGpuInstanceIdFunc func() (int, Return) + + // GetMaxMigDeviceCountFunc mocks the GetMaxMigDeviceCount method. + GetMaxMigDeviceCountFunc func() (int, Return) + + // GetMigDeviceHandleByIndexFunc mocks the GetMigDeviceHandleByIndex method. + GetMigDeviceHandleByIndexFunc func(n int) (Device, Return) + + // GetMigDevicesFunc mocks the GetMigDevices method. + GetMigDevicesFunc func() ([]DeviceLite, Return) + + // GetMigModeFunc mocks the GetMigMode method. + GetMigModeFunc func() (int, int, Return) + + // GetMinorNumberFunc mocks the GetMinorNumber method. + GetMinorNumberFunc func() (int, Return) + + // GetNvLinkRemotePciInfoFunc mocks the GetNvLinkRemotePciInfo method. + GetNvLinkRemotePciInfoFunc func(n int) (PciInfo, Return) + + // GetNvLinkStateFunc mocks the GetNvLinkState method. + GetNvLinkStateFunc func(n int) (EnableState, Return) + + // GetPciInfoFunc mocks the GetPciInfo method. + GetPciInfoFunc func() (PciInfo, Return) + + // GetTopologyCommonAncestorFunc mocks the GetTopologyCommonAncestor method. + GetTopologyCommonAncestorFunc func(device Device) (GpuTopologyLevel, Return) + + // GetUUIDFunc mocks the GetUUID method. + GetUUIDFunc func() (string, Return) + + // IsMigEnabledFunc mocks the IsMigEnabled method. + IsMigEnabledFunc func() (bool, Return) + + // PathFunc mocks the Path method. + PathFunc func() string + + // RegisterEventsFunc mocks the RegisterEvents method. + RegisterEventsFunc func(v uint64, eventSet EventSet) Return + + // calls tracks calls to the methods. + calls struct { + // CPUAffinity holds details about calls to the CPUAffinity method. + CPUAffinity []struct { + } + // GetAttributes holds details about calls to the GetAttributes method. + GetAttributes []struct { + } + // GetComputeInstanceId holds details about calls to the GetComputeInstanceId method. + GetComputeInstanceId []struct { + } + // GetDeviceHandleFromMigDeviceHandle holds details about calls to the GetDeviceHandleFromMigDeviceHandle method. + GetDeviceHandleFromMigDeviceHandle []struct { + } + // GetGpuInstanceId holds details about calls to the GetGpuInstanceId method. + GetGpuInstanceId []struct { + } + // GetMaxMigDeviceCount holds details about calls to the GetMaxMigDeviceCount method. + GetMaxMigDeviceCount []struct { + } + // GetMigDeviceHandleByIndex holds details about calls to the GetMigDeviceHandleByIndex method. + GetMigDeviceHandleByIndex []struct { + // N is the n argument value. + N int + } + // GetMigDevices holds details about calls to the GetMigDevices method. + GetMigDevices []struct { + } + // GetMigMode holds details about calls to the GetMigMode method. + GetMigMode []struct { + } + // GetMinorNumber holds details about calls to the GetMinorNumber method. + GetMinorNumber []struct { + } + // GetNvLinkRemotePciInfo holds details about calls to the GetNvLinkRemotePciInfo method. + GetNvLinkRemotePciInfo []struct { + // N is the n argument value. + N int + } + // GetNvLinkState holds details about calls to the GetNvLinkState method. + GetNvLinkState []struct { + // N is the n argument value. + N int + } + // GetPciInfo holds details about calls to the GetPciInfo method. + GetPciInfo []struct { + } + // GetTopologyCommonAncestor holds details about calls to the GetTopologyCommonAncestor method. + GetTopologyCommonAncestor []struct { + // Device is the device argument value. + Device Device + } + // GetUUID holds details about calls to the GetUUID method. + GetUUID []struct { + } + // IsMigEnabled holds details about calls to the IsMigEnabled method. + IsMigEnabled []struct { + } + // Path holds details about calls to the Path method. + Path []struct { + } + // RegisterEvents holds details about calls to the RegisterEvents method. + RegisterEvents []struct { + // V is the v argument value. + V uint64 + // EventSet is the eventSet argument value. + EventSet EventSet + } + } + lockCPUAffinity sync.RWMutex + lockGetAttributes sync.RWMutex + lockGetComputeInstanceId sync.RWMutex + lockGetDeviceHandleFromMigDeviceHandle sync.RWMutex + lockGetGpuInstanceId sync.RWMutex + lockGetMaxMigDeviceCount sync.RWMutex + lockGetMigDeviceHandleByIndex sync.RWMutex + lockGetMigDevices sync.RWMutex + lockGetMigMode sync.RWMutex + lockGetMinorNumber sync.RWMutex + lockGetNvLinkRemotePciInfo sync.RWMutex + lockGetNvLinkState sync.RWMutex + lockGetPciInfo sync.RWMutex + lockGetTopologyCommonAncestor sync.RWMutex + lockGetUUID sync.RWMutex + lockIsMigEnabled sync.RWMutex + lockPath sync.RWMutex + lockRegisterEvents sync.RWMutex +} + +// CPUAffinity calls CPUAffinityFunc. +func (mock *DeviceLiteMock) CPUAffinity() int64 { + if mock.CPUAffinityFunc == nil { + panic("DeviceLiteMock.CPUAffinityFunc: method is nil but DeviceLite.CPUAffinity was just called") + } + callInfo := struct { + }{} + mock.lockCPUAffinity.Lock() + mock.calls.CPUAffinity = append(mock.calls.CPUAffinity, callInfo) + mock.lockCPUAffinity.Unlock() + return mock.CPUAffinityFunc() +} + +// CPUAffinityCalls gets all the calls that were made to CPUAffinity. +// Check the length with: +// +// len(mockedDeviceLite.CPUAffinityCalls()) +func (mock *DeviceLiteMock) CPUAffinityCalls() []struct { +} { + var calls []struct { + } + mock.lockCPUAffinity.RLock() + calls = mock.calls.CPUAffinity + mock.lockCPUAffinity.RUnlock() + return calls +} + +// GetAttributes calls GetAttributesFunc. +func (mock *DeviceLiteMock) GetAttributes() (DeviceAttributes, Return) { + if mock.GetAttributesFunc == nil { + panic("DeviceLiteMock.GetAttributesFunc: method is nil but DeviceLite.GetAttributes was just called") + } + callInfo := struct { + }{} + mock.lockGetAttributes.Lock() + mock.calls.GetAttributes = append(mock.calls.GetAttributes, callInfo) + mock.lockGetAttributes.Unlock() + return mock.GetAttributesFunc() +} + +// GetAttributesCalls gets all the calls that were made to GetAttributes. +// Check the length with: +// +// len(mockedDeviceLite.GetAttributesCalls()) +func (mock *DeviceLiteMock) GetAttributesCalls() []struct { +} { + var calls []struct { + } + mock.lockGetAttributes.RLock() + calls = mock.calls.GetAttributes + mock.lockGetAttributes.RUnlock() + return calls +} + +// GetComputeInstanceId calls GetComputeInstanceIdFunc. +func (mock *DeviceLiteMock) GetComputeInstanceId() (int, Return) { + if mock.GetComputeInstanceIdFunc == nil { + panic("DeviceLiteMock.GetComputeInstanceIdFunc: method is nil but DeviceLite.GetComputeInstanceId was just called") + } + callInfo := struct { + }{} + mock.lockGetComputeInstanceId.Lock() + mock.calls.GetComputeInstanceId = append(mock.calls.GetComputeInstanceId, callInfo) + mock.lockGetComputeInstanceId.Unlock() + return mock.GetComputeInstanceIdFunc() +} + +// GetComputeInstanceIdCalls gets all the calls that were made to GetComputeInstanceId. +// Check the length with: +// +// len(mockedDeviceLite.GetComputeInstanceIdCalls()) +func (mock *DeviceLiteMock) GetComputeInstanceIdCalls() []struct { +} { + var calls []struct { + } + mock.lockGetComputeInstanceId.RLock() + calls = mock.calls.GetComputeInstanceId + mock.lockGetComputeInstanceId.RUnlock() + return calls +} + +// GetDeviceHandleFromMigDeviceHandle calls GetDeviceHandleFromMigDeviceHandleFunc. +func (mock *DeviceLiteMock) GetDeviceHandleFromMigDeviceHandle() (Device, Return) { + if mock.GetDeviceHandleFromMigDeviceHandleFunc == nil { + panic("DeviceLiteMock.GetDeviceHandleFromMigDeviceHandleFunc: method is nil but DeviceLite.GetDeviceHandleFromMigDeviceHandle was just called") + } + callInfo := struct { + }{} + mock.lockGetDeviceHandleFromMigDeviceHandle.Lock() + mock.calls.GetDeviceHandleFromMigDeviceHandle = append(mock.calls.GetDeviceHandleFromMigDeviceHandle, callInfo) + mock.lockGetDeviceHandleFromMigDeviceHandle.Unlock() + return mock.GetDeviceHandleFromMigDeviceHandleFunc() +} + +// GetDeviceHandleFromMigDeviceHandleCalls gets all the calls that were made to GetDeviceHandleFromMigDeviceHandle. +// Check the length with: +// +// len(mockedDeviceLite.GetDeviceHandleFromMigDeviceHandleCalls()) +func (mock *DeviceLiteMock) GetDeviceHandleFromMigDeviceHandleCalls() []struct { +} { + var calls []struct { + } + mock.lockGetDeviceHandleFromMigDeviceHandle.RLock() + calls = mock.calls.GetDeviceHandleFromMigDeviceHandle + mock.lockGetDeviceHandleFromMigDeviceHandle.RUnlock() + return calls +} + +// GetGpuInstanceId calls GetGpuInstanceIdFunc. +func (mock *DeviceLiteMock) GetGpuInstanceId() (int, Return) { + if mock.GetGpuInstanceIdFunc == nil { + panic("DeviceLiteMock.GetGpuInstanceIdFunc: method is nil but DeviceLite.GetGpuInstanceId was just called") + } + callInfo := struct { + }{} + mock.lockGetGpuInstanceId.Lock() + mock.calls.GetGpuInstanceId = append(mock.calls.GetGpuInstanceId, callInfo) + mock.lockGetGpuInstanceId.Unlock() + return mock.GetGpuInstanceIdFunc() +} + +// GetGpuInstanceIdCalls gets all the calls that were made to GetGpuInstanceId. +// Check the length with: +// +// len(mockedDeviceLite.GetGpuInstanceIdCalls()) +func (mock *DeviceLiteMock) GetGpuInstanceIdCalls() []struct { +} { + var calls []struct { + } + mock.lockGetGpuInstanceId.RLock() + calls = mock.calls.GetGpuInstanceId + mock.lockGetGpuInstanceId.RUnlock() + return calls +} + +// GetMaxMigDeviceCount calls GetMaxMigDeviceCountFunc. +func (mock *DeviceLiteMock) GetMaxMigDeviceCount() (int, Return) { + if mock.GetMaxMigDeviceCountFunc == nil { + panic("DeviceLiteMock.GetMaxMigDeviceCountFunc: method is nil but DeviceLite.GetMaxMigDeviceCount was just called") + } + callInfo := struct { + }{} + mock.lockGetMaxMigDeviceCount.Lock() + mock.calls.GetMaxMigDeviceCount = append(mock.calls.GetMaxMigDeviceCount, callInfo) + mock.lockGetMaxMigDeviceCount.Unlock() + return mock.GetMaxMigDeviceCountFunc() +} + +// GetMaxMigDeviceCountCalls gets all the calls that were made to GetMaxMigDeviceCount. +// Check the length with: +// +// len(mockedDeviceLite.GetMaxMigDeviceCountCalls()) +func (mock *DeviceLiteMock) GetMaxMigDeviceCountCalls() []struct { +} { + var calls []struct { + } + mock.lockGetMaxMigDeviceCount.RLock() + calls = mock.calls.GetMaxMigDeviceCount + mock.lockGetMaxMigDeviceCount.RUnlock() + return calls +} + +// GetMigDeviceHandleByIndex calls GetMigDeviceHandleByIndexFunc. +func (mock *DeviceLiteMock) GetMigDeviceHandleByIndex(n int) (Device, Return) { + if mock.GetMigDeviceHandleByIndexFunc == nil { + panic("DeviceLiteMock.GetMigDeviceHandleByIndexFunc: method is nil but DeviceLite.GetMigDeviceHandleByIndex was just called") + } + callInfo := struct { + N int + }{ + N: n, + } + mock.lockGetMigDeviceHandleByIndex.Lock() + mock.calls.GetMigDeviceHandleByIndex = append(mock.calls.GetMigDeviceHandleByIndex, callInfo) + mock.lockGetMigDeviceHandleByIndex.Unlock() + return mock.GetMigDeviceHandleByIndexFunc(n) +} + +// GetMigDeviceHandleByIndexCalls gets all the calls that were made to GetMigDeviceHandleByIndex. +// Check the length with: +// +// len(mockedDeviceLite.GetMigDeviceHandleByIndexCalls()) +func (mock *DeviceLiteMock) GetMigDeviceHandleByIndexCalls() []struct { + N int +} { + var calls []struct { + N int + } + mock.lockGetMigDeviceHandleByIndex.RLock() + calls = mock.calls.GetMigDeviceHandleByIndex + mock.lockGetMigDeviceHandleByIndex.RUnlock() + return calls +} + +// GetMigDevices calls GetMigDevicesFunc. +func (mock *DeviceLiteMock) GetMigDevices() ([]DeviceLite, Return) { + if mock.GetMigDevicesFunc == nil { + panic("DeviceLiteMock.GetMigDevicesFunc: method is nil but DeviceLite.GetMigDevices was just called") + } + callInfo := struct { + }{} + mock.lockGetMigDevices.Lock() + mock.calls.GetMigDevices = append(mock.calls.GetMigDevices, callInfo) + mock.lockGetMigDevices.Unlock() + return mock.GetMigDevicesFunc() +} + +// GetMigDevicesCalls gets all the calls that were made to GetMigDevices. +// Check the length with: +// +// len(mockedDeviceLite.GetMigDevicesCalls()) +func (mock *DeviceLiteMock) GetMigDevicesCalls() []struct { +} { + var calls []struct { + } + mock.lockGetMigDevices.RLock() + calls = mock.calls.GetMigDevices + mock.lockGetMigDevices.RUnlock() + return calls +} + +// GetMigMode calls GetMigModeFunc. +func (mock *DeviceLiteMock) GetMigMode() (int, int, Return) { + if mock.GetMigModeFunc == nil { + panic("DeviceLiteMock.GetMigModeFunc: method is nil but DeviceLite.GetMigMode was just called") + } + callInfo := struct { + }{} + mock.lockGetMigMode.Lock() + mock.calls.GetMigMode = append(mock.calls.GetMigMode, callInfo) + mock.lockGetMigMode.Unlock() + return mock.GetMigModeFunc() +} + +// GetMigModeCalls gets all the calls that were made to GetMigMode. +// Check the length with: +// +// len(mockedDeviceLite.GetMigModeCalls()) +func (mock *DeviceLiteMock) GetMigModeCalls() []struct { +} { + var calls []struct { + } + mock.lockGetMigMode.RLock() + calls = mock.calls.GetMigMode + mock.lockGetMigMode.RUnlock() + return calls +} + +// GetMinorNumber calls GetMinorNumberFunc. +func (mock *DeviceLiteMock) GetMinorNumber() (int, Return) { + if mock.GetMinorNumberFunc == nil { + panic("DeviceLiteMock.GetMinorNumberFunc: method is nil but DeviceLite.GetMinorNumber was just called") + } + callInfo := struct { + }{} + mock.lockGetMinorNumber.Lock() + mock.calls.GetMinorNumber = append(mock.calls.GetMinorNumber, callInfo) + mock.lockGetMinorNumber.Unlock() + return mock.GetMinorNumberFunc() +} + +// GetMinorNumberCalls gets all the calls that were made to GetMinorNumber. +// Check the length with: +// +// len(mockedDeviceLite.GetMinorNumberCalls()) +func (mock *DeviceLiteMock) GetMinorNumberCalls() []struct { +} { + var calls []struct { + } + mock.lockGetMinorNumber.RLock() + calls = mock.calls.GetMinorNumber + mock.lockGetMinorNumber.RUnlock() + return calls +} + +// GetNvLinkRemotePciInfo calls GetNvLinkRemotePciInfoFunc. +func (mock *DeviceLiteMock) GetNvLinkRemotePciInfo(n int) (PciInfo, Return) { + if mock.GetNvLinkRemotePciInfoFunc == nil { + panic("DeviceLiteMock.GetNvLinkRemotePciInfoFunc: method is nil but DeviceLite.GetNvLinkRemotePciInfo was just called") + } + callInfo := struct { + N int + }{ + N: n, + } + mock.lockGetNvLinkRemotePciInfo.Lock() + mock.calls.GetNvLinkRemotePciInfo = append(mock.calls.GetNvLinkRemotePciInfo, callInfo) + mock.lockGetNvLinkRemotePciInfo.Unlock() + return mock.GetNvLinkRemotePciInfoFunc(n) +} + +// GetNvLinkRemotePciInfoCalls gets all the calls that were made to GetNvLinkRemotePciInfo. +// Check the length with: +// +// len(mockedDeviceLite.GetNvLinkRemotePciInfoCalls()) +func (mock *DeviceLiteMock) GetNvLinkRemotePciInfoCalls() []struct { + N int +} { + var calls []struct { + N int + } + mock.lockGetNvLinkRemotePciInfo.RLock() + calls = mock.calls.GetNvLinkRemotePciInfo + mock.lockGetNvLinkRemotePciInfo.RUnlock() + return calls +} + +// GetNvLinkState calls GetNvLinkStateFunc. +func (mock *DeviceLiteMock) GetNvLinkState(n int) (EnableState, Return) { + if mock.GetNvLinkStateFunc == nil { + panic("DeviceLiteMock.GetNvLinkStateFunc: method is nil but DeviceLite.GetNvLinkState was just called") + } + callInfo := struct { + N int + }{ + N: n, + } + mock.lockGetNvLinkState.Lock() + mock.calls.GetNvLinkState = append(mock.calls.GetNvLinkState, callInfo) + mock.lockGetNvLinkState.Unlock() + return mock.GetNvLinkStateFunc(n) +} + +// GetNvLinkStateCalls gets all the calls that were made to GetNvLinkState. +// Check the length with: +// +// len(mockedDeviceLite.GetNvLinkStateCalls()) +func (mock *DeviceLiteMock) GetNvLinkStateCalls() []struct { + N int +} { + var calls []struct { + N int + } + mock.lockGetNvLinkState.RLock() + calls = mock.calls.GetNvLinkState + mock.lockGetNvLinkState.RUnlock() + return calls +} + +// GetPciInfo calls GetPciInfoFunc. +func (mock *DeviceLiteMock) GetPciInfo() (PciInfo, Return) { + if mock.GetPciInfoFunc == nil { + panic("DeviceLiteMock.GetPciInfoFunc: method is nil but DeviceLite.GetPciInfo was just called") + } + callInfo := struct { + }{} + mock.lockGetPciInfo.Lock() + mock.calls.GetPciInfo = append(mock.calls.GetPciInfo, callInfo) + mock.lockGetPciInfo.Unlock() + return mock.GetPciInfoFunc() +} + +// GetPciInfoCalls gets all the calls that were made to GetPciInfo. +// Check the length with: +// +// len(mockedDeviceLite.GetPciInfoCalls()) +func (mock *DeviceLiteMock) GetPciInfoCalls() []struct { +} { + var calls []struct { + } + mock.lockGetPciInfo.RLock() + calls = mock.calls.GetPciInfo + mock.lockGetPciInfo.RUnlock() + return calls +} + +// GetTopologyCommonAncestor calls GetTopologyCommonAncestorFunc. +func (mock *DeviceLiteMock) GetTopologyCommonAncestor(device Device) (GpuTopologyLevel, Return) { + if mock.GetTopologyCommonAncestorFunc == nil { + panic("DeviceLiteMock.GetTopologyCommonAncestorFunc: method is nil but DeviceLite.GetTopologyCommonAncestor was just called") + } + callInfo := struct { + Device Device + }{ + Device: device, + } + mock.lockGetTopologyCommonAncestor.Lock() + mock.calls.GetTopologyCommonAncestor = append(mock.calls.GetTopologyCommonAncestor, callInfo) + mock.lockGetTopologyCommonAncestor.Unlock() + return mock.GetTopologyCommonAncestorFunc(device) +} + +// GetTopologyCommonAncestorCalls gets all the calls that were made to GetTopologyCommonAncestor. +// Check the length with: +// +// len(mockedDeviceLite.GetTopologyCommonAncestorCalls()) +func (mock *DeviceLiteMock) GetTopologyCommonAncestorCalls() []struct { + Device Device +} { + var calls []struct { + Device Device + } + mock.lockGetTopologyCommonAncestor.RLock() + calls = mock.calls.GetTopologyCommonAncestor + mock.lockGetTopologyCommonAncestor.RUnlock() + return calls +} + +// GetUUID calls GetUUIDFunc. +func (mock *DeviceLiteMock) GetUUID() (string, Return) { + if mock.GetUUIDFunc == nil { + panic("DeviceLiteMock.GetUUIDFunc: method is nil but DeviceLite.GetUUID was just called") + } + callInfo := struct { + }{} + mock.lockGetUUID.Lock() + mock.calls.GetUUID = append(mock.calls.GetUUID, callInfo) + mock.lockGetUUID.Unlock() + return mock.GetUUIDFunc() +} + +// GetUUIDCalls gets all the calls that were made to GetUUID. +// Check the length with: +// +// len(mockedDeviceLite.GetUUIDCalls()) +func (mock *DeviceLiteMock) GetUUIDCalls() []struct { +} { + var calls []struct { + } + mock.lockGetUUID.RLock() + calls = mock.calls.GetUUID + mock.lockGetUUID.RUnlock() + return calls +} + +// IsMigEnabled calls IsMigEnabledFunc. +func (mock *DeviceLiteMock) IsMigEnabled() (bool, Return) { + if mock.IsMigEnabledFunc == nil { + panic("DeviceLiteMock.IsMigEnabledFunc: method is nil but DeviceLite.IsMigEnabled was just called") + } + callInfo := struct { + }{} + mock.lockIsMigEnabled.Lock() + mock.calls.IsMigEnabled = append(mock.calls.IsMigEnabled, callInfo) + mock.lockIsMigEnabled.Unlock() + return mock.IsMigEnabledFunc() +} + +// IsMigEnabledCalls gets all the calls that were made to IsMigEnabled. +// Check the length with: +// +// len(mockedDeviceLite.IsMigEnabledCalls()) +func (mock *DeviceLiteMock) IsMigEnabledCalls() []struct { +} { + var calls []struct { + } + mock.lockIsMigEnabled.RLock() + calls = mock.calls.IsMigEnabled + mock.lockIsMigEnabled.RUnlock() + return calls +} + +// Path calls PathFunc. +func (mock *DeviceLiteMock) Path() string { + if mock.PathFunc == nil { + panic("DeviceLiteMock.PathFunc: method is nil but DeviceLite.Path was just called") + } + callInfo := struct { + }{} + mock.lockPath.Lock() + mock.calls.Path = append(mock.calls.Path, callInfo) + mock.lockPath.Unlock() + return mock.PathFunc() +} + +// PathCalls gets all the calls that were made to Path. +// Check the length with: +// +// len(mockedDeviceLite.PathCalls()) +func (mock *DeviceLiteMock) PathCalls() []struct { +} { + var calls []struct { + } + mock.lockPath.RLock() + calls = mock.calls.Path + mock.lockPath.RUnlock() + return calls +} + +// RegisterEvents calls RegisterEventsFunc. +func (mock *DeviceLiteMock) RegisterEvents(v uint64, eventSet EventSet) Return { + if mock.RegisterEventsFunc == nil { + panic("DeviceLiteMock.RegisterEventsFunc: method is nil but DeviceLite.RegisterEvents was just called") + } + callInfo := struct { + V uint64 + EventSet EventSet + }{ + V: v, + EventSet: eventSet, + } + mock.lockRegisterEvents.Lock() + mock.calls.RegisterEvents = append(mock.calls.RegisterEvents, callInfo) + mock.lockRegisterEvents.Unlock() + return mock.RegisterEventsFunc(v, eventSet) +} + +// RegisterEventsCalls gets all the calls that were made to RegisterEvents. +// Check the length with: +// +// len(mockedDeviceLite.RegisterEventsCalls()) +func (mock *DeviceLiteMock) RegisterEventsCalls() []struct { + V uint64 + EventSet EventSet +} { + var calls []struct { + V uint64 + EventSet EventSet + } + mock.lockRegisterEvents.RLock() + calls = mock.calls.RegisterEvents + mock.lockRegisterEvents.RUnlock() + return calls +} diff --git a/internal/gpulib/device_mock.go b/internal/gpulib/device_mock.go new file mode 100644 index 0000000..9a6c226 --- /dev/null +++ b/internal/gpulib/device_mock.go @@ -0,0 +1,645 @@ +// Code generated by moq; DO NOT EDIT. +// github.com/matryer/moq + +package gpulib + +import ( + "sync" +) + +// Ensure, that DeviceMock does implement Device. +// If this is not the case, regenerate this file with moq. +var _ Device = &DeviceMock{} + +// DeviceMock is a mock implementation of Device. +// +// func TestSomethingThatUsesDevice(t *testing.T) { +// +// // make and configure a mocked Device +// mockedDevice := &DeviceMock{ +// GetAttributesFunc: func() (DeviceAttributes, Return) { +// panic("mock out the GetAttributes method") +// }, +// GetComputeInstanceIdFunc: func() (int, Return) { +// panic("mock out the GetComputeInstanceId method") +// }, +// GetDeviceHandleFromMigDeviceHandleFunc: func() (Device, Return) { +// panic("mock out the GetDeviceHandleFromMigDeviceHandle method") +// }, +// GetGpuInstanceIdFunc: func() (int, Return) { +// panic("mock out the GetGpuInstanceId method") +// }, +// GetMaxMigDeviceCountFunc: func() (int, Return) { +// panic("mock out the GetMaxMigDeviceCount method") +// }, +// GetMigDeviceHandleByIndexFunc: func(n int) (Device, Return) { +// panic("mock out the GetMigDeviceHandleByIndex method") +// }, +// GetMigModeFunc: func() (int, int, Return) { +// panic("mock out the GetMigMode method") +// }, +// GetMinorNumberFunc: func() (int, Return) { +// panic("mock out the GetMinorNumber method") +// }, +// GetNvLinkRemotePciInfoFunc: func(n int) (PciInfo, Return) { +// panic("mock out the GetNvLinkRemotePciInfo method") +// }, +// GetNvLinkStateFunc: func(n int) (EnableState, Return) { +// panic("mock out the GetNvLinkState method") +// }, +// GetPciInfoFunc: func() (PciInfo, Return) { +// panic("mock out the GetPciInfo method") +// }, +// GetTopologyCommonAncestorFunc: func(device Device) (GpuTopologyLevel, Return) { +// panic("mock out the GetTopologyCommonAncestor method") +// }, +// GetUUIDFunc: func() (string, Return) { +// panic("mock out the GetUUID method") +// }, +// RegisterEventsFunc: func(v uint64, eventSet EventSet) Return { +// panic("mock out the RegisterEvents method") +// }, +// } +// +// // use mockedDevice in code that requires Device +// // and then make assertions. +// +// } +type DeviceMock struct { + // GetAttributesFunc mocks the GetAttributes method. + GetAttributesFunc func() (DeviceAttributes, Return) + + // GetComputeInstanceIdFunc mocks the GetComputeInstanceId method. + GetComputeInstanceIdFunc func() (int, Return) + + // GetDeviceHandleFromMigDeviceHandleFunc mocks the GetDeviceHandleFromMigDeviceHandle method. + GetDeviceHandleFromMigDeviceHandleFunc func() (Device, Return) + + // GetGpuInstanceIdFunc mocks the GetGpuInstanceId method. + GetGpuInstanceIdFunc func() (int, Return) + + // GetMaxMigDeviceCountFunc mocks the GetMaxMigDeviceCount method. + GetMaxMigDeviceCountFunc func() (int, Return) + + // GetMigDeviceHandleByIndexFunc mocks the GetMigDeviceHandleByIndex method. + GetMigDeviceHandleByIndexFunc func(n int) (Device, Return) + + // GetMigModeFunc mocks the GetMigMode method. + GetMigModeFunc func() (int, int, Return) + + // GetMinorNumberFunc mocks the GetMinorNumber method. + GetMinorNumberFunc func() (int, Return) + + // GetNvLinkRemotePciInfoFunc mocks the GetNvLinkRemotePciInfo method. + GetNvLinkRemotePciInfoFunc func(n int) (PciInfo, Return) + + // GetNvLinkStateFunc mocks the GetNvLinkState method. + GetNvLinkStateFunc func(n int) (EnableState, Return) + + // GetPciInfoFunc mocks the GetPciInfo method. + GetPciInfoFunc func() (PciInfo, Return) + + // GetTopologyCommonAncestorFunc mocks the GetTopologyCommonAncestor method. + GetTopologyCommonAncestorFunc func(device Device) (GpuTopologyLevel, Return) + + // GetUUIDFunc mocks the GetUUID method. + GetUUIDFunc func() (string, Return) + + // RegisterEventsFunc mocks the RegisterEvents method. + RegisterEventsFunc func(v uint64, eventSet EventSet) Return + + // calls tracks calls to the methods. + calls struct { + // GetAttributes holds details about calls to the GetAttributes method. + GetAttributes []struct { + } + // GetComputeInstanceId holds details about calls to the GetComputeInstanceId method. + GetComputeInstanceId []struct { + } + // GetDeviceHandleFromMigDeviceHandle holds details about calls to the GetDeviceHandleFromMigDeviceHandle method. + GetDeviceHandleFromMigDeviceHandle []struct { + } + // GetGpuInstanceId holds details about calls to the GetGpuInstanceId method. + GetGpuInstanceId []struct { + } + // GetMaxMigDeviceCount holds details about calls to the GetMaxMigDeviceCount method. + GetMaxMigDeviceCount []struct { + } + // GetMigDeviceHandleByIndex holds details about calls to the GetMigDeviceHandleByIndex method. + GetMigDeviceHandleByIndex []struct { + // N is the n argument value. + N int + } + // GetMigMode holds details about calls to the GetMigMode method. + GetMigMode []struct { + } + // GetMinorNumber holds details about calls to the GetMinorNumber method. + GetMinorNumber []struct { + } + // GetNvLinkRemotePciInfo holds details about calls to the GetNvLinkRemotePciInfo method. + GetNvLinkRemotePciInfo []struct { + // N is the n argument value. + N int + } + // GetNvLinkState holds details about calls to the GetNvLinkState method. + GetNvLinkState []struct { + // N is the n argument value. + N int + } + // GetPciInfo holds details about calls to the GetPciInfo method. + GetPciInfo []struct { + } + // GetTopologyCommonAncestor holds details about calls to the GetTopologyCommonAncestor method. + GetTopologyCommonAncestor []struct { + // Device is the device argument value. + Device Device + } + // GetUUID holds details about calls to the GetUUID method. + GetUUID []struct { + } + // RegisterEvents holds details about calls to the RegisterEvents method. + RegisterEvents []struct { + // V is the v argument value. + V uint64 + // EventSet is the eventSet argument value. + EventSet EventSet + } + } + lockGetAttributes sync.RWMutex + lockGetComputeInstanceId sync.RWMutex + lockGetDeviceHandleFromMigDeviceHandle sync.RWMutex + lockGetGpuInstanceId sync.RWMutex + lockGetMaxMigDeviceCount sync.RWMutex + lockGetMigDeviceHandleByIndex sync.RWMutex + lockGetMigMode sync.RWMutex + lockGetMinorNumber sync.RWMutex + lockGetNvLinkRemotePciInfo sync.RWMutex + lockGetNvLinkState sync.RWMutex + lockGetPciInfo sync.RWMutex + lockGetTopologyCommonAncestor sync.RWMutex + lockGetUUID sync.RWMutex + lockRegisterEvents sync.RWMutex +} + +// GetAttributes calls GetAttributesFunc. +func (mock *DeviceMock) GetAttributes() (DeviceAttributes, Return) { + callInfo := struct { + }{} + mock.lockGetAttributes.Lock() + mock.calls.GetAttributes = append(mock.calls.GetAttributes, callInfo) + mock.lockGetAttributes.Unlock() + if mock.GetAttributesFunc == nil { + var ( + deviceAttributesOut DeviceAttributes + returnOut Return + ) + return deviceAttributesOut, returnOut + } + return mock.GetAttributesFunc() +} + +// GetAttributesCalls gets all the calls that were made to GetAttributes. +// Check the length with: +// +// len(mockedDevice.GetAttributesCalls()) +func (mock *DeviceMock) GetAttributesCalls() []struct { +} { + var calls []struct { + } + mock.lockGetAttributes.RLock() + calls = mock.calls.GetAttributes + mock.lockGetAttributes.RUnlock() + return calls +} + +// GetComputeInstanceId calls GetComputeInstanceIdFunc. +func (mock *DeviceMock) GetComputeInstanceId() (int, Return) { + callInfo := struct { + }{} + mock.lockGetComputeInstanceId.Lock() + mock.calls.GetComputeInstanceId = append(mock.calls.GetComputeInstanceId, callInfo) + mock.lockGetComputeInstanceId.Unlock() + if mock.GetComputeInstanceIdFunc == nil { + var ( + nOut int + returnOut Return + ) + return nOut, returnOut + } + return mock.GetComputeInstanceIdFunc() +} + +// GetComputeInstanceIdCalls gets all the calls that were made to GetComputeInstanceId. +// Check the length with: +// +// len(mockedDevice.GetComputeInstanceIdCalls()) +func (mock *DeviceMock) GetComputeInstanceIdCalls() []struct { +} { + var calls []struct { + } + mock.lockGetComputeInstanceId.RLock() + calls = mock.calls.GetComputeInstanceId + mock.lockGetComputeInstanceId.RUnlock() + return calls +} + +// GetDeviceHandleFromMigDeviceHandle calls GetDeviceHandleFromMigDeviceHandleFunc. +func (mock *DeviceMock) GetDeviceHandleFromMigDeviceHandle() (Device, Return) { + callInfo := struct { + }{} + mock.lockGetDeviceHandleFromMigDeviceHandle.Lock() + mock.calls.GetDeviceHandleFromMigDeviceHandle = append(mock.calls.GetDeviceHandleFromMigDeviceHandle, callInfo) + mock.lockGetDeviceHandleFromMigDeviceHandle.Unlock() + if mock.GetDeviceHandleFromMigDeviceHandleFunc == nil { + var ( + deviceOut Device + returnOut Return + ) + return deviceOut, returnOut + } + return mock.GetDeviceHandleFromMigDeviceHandleFunc() +} + +// GetDeviceHandleFromMigDeviceHandleCalls gets all the calls that were made to GetDeviceHandleFromMigDeviceHandle. +// Check the length with: +// +// len(mockedDevice.GetDeviceHandleFromMigDeviceHandleCalls()) +func (mock *DeviceMock) GetDeviceHandleFromMigDeviceHandleCalls() []struct { +} { + var calls []struct { + } + mock.lockGetDeviceHandleFromMigDeviceHandle.RLock() + calls = mock.calls.GetDeviceHandleFromMigDeviceHandle + mock.lockGetDeviceHandleFromMigDeviceHandle.RUnlock() + return calls +} + +// GetGpuInstanceId calls GetGpuInstanceIdFunc. +func (mock *DeviceMock) GetGpuInstanceId() (int, Return) { + callInfo := struct { + }{} + mock.lockGetGpuInstanceId.Lock() + mock.calls.GetGpuInstanceId = append(mock.calls.GetGpuInstanceId, callInfo) + mock.lockGetGpuInstanceId.Unlock() + if mock.GetGpuInstanceIdFunc == nil { + var ( + nOut int + returnOut Return + ) + return nOut, returnOut + } + return mock.GetGpuInstanceIdFunc() +} + +// GetGpuInstanceIdCalls gets all the calls that were made to GetGpuInstanceId. +// Check the length with: +// +// len(mockedDevice.GetGpuInstanceIdCalls()) +func (mock *DeviceMock) GetGpuInstanceIdCalls() []struct { +} { + var calls []struct { + } + mock.lockGetGpuInstanceId.RLock() + calls = mock.calls.GetGpuInstanceId + mock.lockGetGpuInstanceId.RUnlock() + return calls +} + +// GetMaxMigDeviceCount calls GetMaxMigDeviceCountFunc. +func (mock *DeviceMock) GetMaxMigDeviceCount() (int, Return) { + callInfo := struct { + }{} + mock.lockGetMaxMigDeviceCount.Lock() + mock.calls.GetMaxMigDeviceCount = append(mock.calls.GetMaxMigDeviceCount, callInfo) + mock.lockGetMaxMigDeviceCount.Unlock() + if mock.GetMaxMigDeviceCountFunc == nil { + var ( + nOut int + returnOut Return + ) + return nOut, returnOut + } + return mock.GetMaxMigDeviceCountFunc() +} + +// GetMaxMigDeviceCountCalls gets all the calls that were made to GetMaxMigDeviceCount. +// Check the length with: +// +// len(mockedDevice.GetMaxMigDeviceCountCalls()) +func (mock *DeviceMock) GetMaxMigDeviceCountCalls() []struct { +} { + var calls []struct { + } + mock.lockGetMaxMigDeviceCount.RLock() + calls = mock.calls.GetMaxMigDeviceCount + mock.lockGetMaxMigDeviceCount.RUnlock() + return calls +} + +// GetMigDeviceHandleByIndex calls GetMigDeviceHandleByIndexFunc. +func (mock *DeviceMock) GetMigDeviceHandleByIndex(n int) (Device, Return) { + callInfo := struct { + N int + }{ + N: n, + } + mock.lockGetMigDeviceHandleByIndex.Lock() + mock.calls.GetMigDeviceHandleByIndex = append(mock.calls.GetMigDeviceHandleByIndex, callInfo) + mock.lockGetMigDeviceHandleByIndex.Unlock() + if mock.GetMigDeviceHandleByIndexFunc == nil { + var ( + deviceOut Device + returnOut Return + ) + return deviceOut, returnOut + } + return mock.GetMigDeviceHandleByIndexFunc(n) +} + +// GetMigDeviceHandleByIndexCalls gets all the calls that were made to GetMigDeviceHandleByIndex. +// Check the length with: +// +// len(mockedDevice.GetMigDeviceHandleByIndexCalls()) +func (mock *DeviceMock) GetMigDeviceHandleByIndexCalls() []struct { + N int +} { + var calls []struct { + N int + } + mock.lockGetMigDeviceHandleByIndex.RLock() + calls = mock.calls.GetMigDeviceHandleByIndex + mock.lockGetMigDeviceHandleByIndex.RUnlock() + return calls +} + +// GetMigMode calls GetMigModeFunc. +func (mock *DeviceMock) GetMigMode() (int, int, Return) { + callInfo := struct { + }{} + mock.lockGetMigMode.Lock() + mock.calls.GetMigMode = append(mock.calls.GetMigMode, callInfo) + mock.lockGetMigMode.Unlock() + if mock.GetMigModeFunc == nil { + var ( + nOut1 int + nOut2 int + returnOut Return + ) + return nOut1, nOut2, returnOut + } + return mock.GetMigModeFunc() +} + +// GetMigModeCalls gets all the calls that were made to GetMigMode. +// Check the length with: +// +// len(mockedDevice.GetMigModeCalls()) +func (mock *DeviceMock) GetMigModeCalls() []struct { +} { + var calls []struct { + } + mock.lockGetMigMode.RLock() + calls = mock.calls.GetMigMode + mock.lockGetMigMode.RUnlock() + return calls +} + +// GetMinorNumber calls GetMinorNumberFunc. +func (mock *DeviceMock) GetMinorNumber() (int, Return) { + callInfo := struct { + }{} + mock.lockGetMinorNumber.Lock() + mock.calls.GetMinorNumber = append(mock.calls.GetMinorNumber, callInfo) + mock.lockGetMinorNumber.Unlock() + if mock.GetMinorNumberFunc == nil { + var ( + nOut int + returnOut Return + ) + return nOut, returnOut + } + return mock.GetMinorNumberFunc() +} + +// GetMinorNumberCalls gets all the calls that were made to GetMinorNumber. +// Check the length with: +// +// len(mockedDevice.GetMinorNumberCalls()) +func (mock *DeviceMock) GetMinorNumberCalls() []struct { +} { + var calls []struct { + } + mock.lockGetMinorNumber.RLock() + calls = mock.calls.GetMinorNumber + mock.lockGetMinorNumber.RUnlock() + return calls +} + +// GetNvLinkRemotePciInfo calls GetNvLinkRemotePciInfoFunc. +func (mock *DeviceMock) GetNvLinkRemotePciInfo(n int) (PciInfo, Return) { + callInfo := struct { + N int + }{ + N: n, + } + mock.lockGetNvLinkRemotePciInfo.Lock() + mock.calls.GetNvLinkRemotePciInfo = append(mock.calls.GetNvLinkRemotePciInfo, callInfo) + mock.lockGetNvLinkRemotePciInfo.Unlock() + if mock.GetNvLinkRemotePciInfoFunc == nil { + var ( + pciInfoOut PciInfo + returnOut Return + ) + return pciInfoOut, returnOut + } + return mock.GetNvLinkRemotePciInfoFunc(n) +} + +// GetNvLinkRemotePciInfoCalls gets all the calls that were made to GetNvLinkRemotePciInfo. +// Check the length with: +// +// len(mockedDevice.GetNvLinkRemotePciInfoCalls()) +func (mock *DeviceMock) GetNvLinkRemotePciInfoCalls() []struct { + N int +} { + var calls []struct { + N int + } + mock.lockGetNvLinkRemotePciInfo.RLock() + calls = mock.calls.GetNvLinkRemotePciInfo + mock.lockGetNvLinkRemotePciInfo.RUnlock() + return calls +} + +// GetNvLinkState calls GetNvLinkStateFunc. +func (mock *DeviceMock) GetNvLinkState(n int) (EnableState, Return) { + callInfo := struct { + N int + }{ + N: n, + } + mock.lockGetNvLinkState.Lock() + mock.calls.GetNvLinkState = append(mock.calls.GetNvLinkState, callInfo) + mock.lockGetNvLinkState.Unlock() + if mock.GetNvLinkStateFunc == nil { + var ( + enableStateOut EnableState + returnOut Return + ) + return enableStateOut, returnOut + } + return mock.GetNvLinkStateFunc(n) +} + +// GetNvLinkStateCalls gets all the calls that were made to GetNvLinkState. +// Check the length with: +// +// len(mockedDevice.GetNvLinkStateCalls()) +func (mock *DeviceMock) GetNvLinkStateCalls() []struct { + N int +} { + var calls []struct { + N int + } + mock.lockGetNvLinkState.RLock() + calls = mock.calls.GetNvLinkState + mock.lockGetNvLinkState.RUnlock() + return calls +} + +// GetPciInfo calls GetPciInfoFunc. +func (mock *DeviceMock) GetPciInfo() (PciInfo, Return) { + callInfo := struct { + }{} + mock.lockGetPciInfo.Lock() + mock.calls.GetPciInfo = append(mock.calls.GetPciInfo, callInfo) + mock.lockGetPciInfo.Unlock() + if mock.GetPciInfoFunc == nil { + var ( + pciInfoOut PciInfo + returnOut Return + ) + return pciInfoOut, returnOut + } + return mock.GetPciInfoFunc() +} + +// GetPciInfoCalls gets all the calls that were made to GetPciInfo. +// Check the length with: +// +// len(mockedDevice.GetPciInfoCalls()) +func (mock *DeviceMock) GetPciInfoCalls() []struct { +} { + var calls []struct { + } + mock.lockGetPciInfo.RLock() + calls = mock.calls.GetPciInfo + mock.lockGetPciInfo.RUnlock() + return calls +} + +// GetTopologyCommonAncestor calls GetTopologyCommonAncestorFunc. +func (mock *DeviceMock) GetTopologyCommonAncestor(device Device) (GpuTopologyLevel, Return) { + callInfo := struct { + Device Device + }{ + Device: device, + } + mock.lockGetTopologyCommonAncestor.Lock() + mock.calls.GetTopologyCommonAncestor = append(mock.calls.GetTopologyCommonAncestor, callInfo) + mock.lockGetTopologyCommonAncestor.Unlock() + if mock.GetTopologyCommonAncestorFunc == nil { + var ( + gpuTopologyLevelOut GpuTopologyLevel + returnOut Return + ) + return gpuTopologyLevelOut, returnOut + } + return mock.GetTopologyCommonAncestorFunc(device) +} + +// GetTopologyCommonAncestorCalls gets all the calls that were made to GetTopologyCommonAncestor. +// Check the length with: +// +// len(mockedDevice.GetTopologyCommonAncestorCalls()) +func (mock *DeviceMock) GetTopologyCommonAncestorCalls() []struct { + Device Device +} { + var calls []struct { + Device Device + } + mock.lockGetTopologyCommonAncestor.RLock() + calls = mock.calls.GetTopologyCommonAncestor + mock.lockGetTopologyCommonAncestor.RUnlock() + return calls +} + +// GetUUID calls GetUUIDFunc. +func (mock *DeviceMock) GetUUID() (string, Return) { + callInfo := struct { + }{} + mock.lockGetUUID.Lock() + mock.calls.GetUUID = append(mock.calls.GetUUID, callInfo) + mock.lockGetUUID.Unlock() + if mock.GetUUIDFunc == nil { + var ( + sOut string + returnOut Return + ) + return sOut, returnOut + } + return mock.GetUUIDFunc() +} + +// GetUUIDCalls gets all the calls that were made to GetUUID. +// Check the length with: +// +// len(mockedDevice.GetUUIDCalls()) +func (mock *DeviceMock) GetUUIDCalls() []struct { +} { + var calls []struct { + } + mock.lockGetUUID.RLock() + calls = mock.calls.GetUUID + mock.lockGetUUID.RUnlock() + return calls +} + +// RegisterEvents calls RegisterEventsFunc. +func (mock *DeviceMock) RegisterEvents(v uint64, eventSet EventSet) Return { + callInfo := struct { + V uint64 + EventSet EventSet + }{ + V: v, + EventSet: eventSet, + } + mock.lockRegisterEvents.Lock() + mock.calls.RegisterEvents = append(mock.calls.RegisterEvents, callInfo) + mock.lockRegisterEvents.Unlock() + if mock.RegisterEventsFunc == nil { + var ( + returnOut Return + ) + return returnOut + } + return mock.RegisterEventsFunc(v, eventSet) +} + +// RegisterEventsCalls gets all the calls that were made to RegisterEvents. +// Check the length with: +// +// len(mockedDevice.RegisterEventsCalls()) +func (mock *DeviceMock) RegisterEventsCalls() []struct { + V uint64 + EventSet EventSet +} { + var calls []struct { + V uint64 + EventSet EventSet + } + mock.lockRegisterEvents.RLock() + calls = mock.calls.RegisterEvents + mock.lockRegisterEvents.RUnlock() + return calls +} diff --git a/internal/gpulib/events.go b/internal/gpulib/events.go new file mode 100644 index 0000000..9df9554 --- /dev/null +++ b/internal/gpulib/events.go @@ -0,0 +1,39 @@ +/** +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package gpulib + +import ( + "gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvml" +) + +type EventData nvml.EventData +type EventSet nvml.EventSet + +func EventSetCreate() (EventSet, Return) { + e1, ret := nvml.EventSetCreate() + return EventSet(e1), nvmlReturn(ret) +} + +func (e EventSet) Free() Return { + ret := nvml.EventSet(e).Free() + return nvmlReturn(ret) +} + +func (e EventSet) Wait(timeoutms uint32) (EventData, Return) { + d1, ret := nvml.EventSet(e).Wait(timeoutms) + return EventData(d1), nvmlReturn(ret) +} diff --git a/internal/gpulib/links.go b/internal/gpulib/links.go new file mode 100644 index 0000000..e4b72ec --- /dev/null +++ b/internal/gpulib/links.go @@ -0,0 +1,136 @@ +/** +# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package gpulib + +import ( + "fmt" + + "gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvml" +) + +type P2PLinkType uint + +const ( + P2PLinkUnknown P2PLinkType = iota + P2PLinkCrossCPU + P2PLinkSameCPU + P2PLinkHostBridge + P2PLinkMultiSwitch + P2PLinkSingleSwitch + P2PLinkSameBoard + SingleNVLINKLink + TwoNVLINKLinks + ThreeNVLINKLinks + FourNVLINKLinks + FiveNVLINKLinks + SixNVLINKLinks + SevenNVLINKLinks + EightNVLINKLinks + NineNVLINKLinks + TenNVLINKLinks + ElevenNVLINKLinks + TwelveNVLINKLinks +) + +func GetP2PLink(dev1 Device, dev2 Device) (P2PLinkType, error) { + level, ret := dev1.GetTopologyCommonAncestor(dev2) + if ret.Value() != nvml.SUCCESS { + return P2PLinkUnknown, fmt.Errorf("failed to get common ancestor: %v", ret.Error()) + } + + var link P2PLinkType + + switch nvml.GpuTopologyLevel(level) { + case TOPOLOGY_INTERNAL: + link = P2PLinkSameBoard + case TOPOLOGY_SINGLE: + link = P2PLinkSingleSwitch + case TOPOLOGY_MULTIPLE: + link = P2PLinkMultiSwitch + case TOPOLOGY_HOSTBRIDGE: + link = P2PLinkHostBridge + case TOPOLOGY_NODE: // NOTE: TOPOLOGY_CPU was renamed TOPOLOGY_NODE + link = P2PLinkSameCPU + case TOPOLOGY_SYSTEM: + link = P2PLinkCrossCPU + default: + return P2PLinkUnknown, fmt.Errorf("unsupported topology level: %v", level) + } + + return link, nil +} + +func GetNVLink(dev1 Device, dev2 Device) (link P2PLinkType, err error) { + pciInfo2, ret := dev2.GetPciInfo() + if ret.Value() != nvml.SUCCESS { + return P2PLinkUnknown, fmt.Errorf("failed to get PciInfo: %v", ret.Error()) + } + + pciInfos, err := deviceGetAllNvLinkRemotePciInfo(dev1) + if err != nil { + return P2PLinkUnknown, err + } + + nvlink := P2PLinkUnknown + for _, pciInfo1 := range pciInfos { + if pciInfo1.BusId == pciInfo2.BusId { + nvlink = nvlink.add() + } + } + + // TODO(klueska): Handle NVSwitch semantics + + return nvlink, nil +} + +func (l P2PLinkType) add() P2PLinkType { + if l == P2PLinkUnknown { + return SingleNVLINKLink + } + if l == TwelveNVLINKLinks { + return TwelveNVLINKLinks + } + return l + 1 +} + +func deviceGetAllNvLinkRemotePciInfo(dev Device) ([]PciInfo, error) { + var pciInfos []PciInfo + for i := 0; i < nvml.NVLINK_MAX_LINKS; i++ { + state, ret := dev.GetNvLinkState(i) + if ret.Value() == ERROR_NOT_SUPPORTED || ret.Value() == ERROR_INVALID_ARGUMENT { + continue + } + if ret.Value() != SUCCESS { + return nil, fmt.Errorf("failed to query link %d state: %v", i, ret.Error()) + } + if state != FEATURE_ENABLED { + continue + } + + info, ret := dev.GetNvLinkRemotePciInfo(i) + if ret.Value() == nvml.ERROR_NOT_SUPPORTED || ret.Value() == nvml.ERROR_INVALID_ARGUMENT { + continue + } + if ret.Value() != nvml.SUCCESS { + return nil, fmt.Errorf("failed to query remote link %d: %v", i, ret.Error()) + } + pciInfos = append(pciInfos, info) + } + + return pciInfos, nil + +} diff --git a/internal/gpulib/return.go b/internal/gpulib/return.go new file mode 100644 index 0000000..5fcf417 --- /dev/null +++ b/internal/gpulib/return.go @@ -0,0 +1,43 @@ +/** +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package gpulib + +import ( + "gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvml" +) + +type Return interface { + Value() nvml.Return + String() string + Error() string +} + +type nvmlReturn nvml.Return + +var _ Return = (*nvmlReturn)(nil) + +func (r nvmlReturn) Value() nvml.Return { + return nvml.Return(r) +} + +func (r nvmlReturn) String() string { + return r.Error() +} + +func (r nvmlReturn) Error() string { + return nvml.ErrorString(nvml.Return(r)) +} diff --git a/internal/gpulib/system.go b/internal/gpulib/system.go new file mode 100644 index 0000000..b9495ae --- /dev/null +++ b/internal/gpulib/system.go @@ -0,0 +1,39 @@ +/** +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package gpulib + +import ( + "gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvml" +) + +func Init() Return { + return nvmlReturn(nvml.Init()) +} + +func Shutdown() Return { + return nvmlReturn(nvml.Shutdown()) +} + +func DeviceGetCount() (int, Return) { + count, ret := nvml.DeviceGetCount() + return count, nvmlReturn(ret) +} + +func DeviceGetHandleByUUID(uuid string) (Device, Return) { + d1, ret := nvml.DeviceGetHandleByUUID(uuid) + return nvmlDevice(d1), nvmlReturn(ret) +} diff --git a/internal/gpulib/types.go b/internal/gpulib/types.go new file mode 100644 index 0000000..31ee886 --- /dev/null +++ b/internal/gpulib/types.go @@ -0,0 +1,26 @@ +/** +# Copyright (c) 2023, NVIDIA CORPORATION. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +**/ + +package gpulib + +import ( + "gitlab.com/nvidia/cloud-native/go-nvlib/pkg/nvml" +) + +type DeviceAttributes nvml.DeviceAttributes +type GpuTopologyLevel nvml.GpuTopologyLevel +type EnableState nvml.EnableState +type PciInfo nvml.PciInfo diff --git a/vendor/github.com/NVIDIA/gpu-monitoring-tools/LICENSE b/vendor/github.com/NVIDIA/go-nvml/LICENSE similarity index 100% rename from vendor/github.com/NVIDIA/gpu-monitoring-tools/LICENSE rename to vendor/github.com/NVIDIA/go-nvml/LICENSE diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/dl/dl.go b/vendor/github.com/NVIDIA/go-nvml/pkg/dl/dl.go new file mode 100644 index 0000000..21a0209 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/dl/dl.go @@ -0,0 +1,82 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package dl + +import ( + "fmt" + "unsafe" +) + +// #cgo LDFLAGS: -ldl +// #include +// #include +import "C" + +const ( + RTLD_LAZY = C.RTLD_LAZY + RTLD_NOW = C.RTLD_NOW + RTLD_GLOBAL = C.RTLD_GLOBAL + RTLD_LOCAL = C.RTLD_LOCAL + RTLD_NODELETE = C.RTLD_NODELETE + RTLD_NOLOAD = C.RTLD_NOLOAD + RTLD_DEEPBIND = C.RTLD_DEEPBIND +) + +type DynamicLibrary struct{ + Name string + Flags int + handle unsafe.Pointer +} + +func New(name string, flags int) *DynamicLibrary { + return &DynamicLibrary{ + Name: name, + Flags: flags, + handle: nil, + } +} + +func (dl *DynamicLibrary) Open() error { + name := C.CString(dl.Name) + defer C.free(unsafe.Pointer(name)) + + handle := C.dlopen(name, C.int(dl.Flags)) + if handle == C.NULL { + return fmt.Errorf("%s", C.GoString(C.dlerror())) + } + dl.handle = handle + return nil +} + +func (dl *DynamicLibrary) Close() error { + err := C.dlclose(dl.handle) + if err != 0 { + return fmt.Errorf("%s", C.GoString(C.dlerror())) + } + return nil +} + +func (dl *DynamicLibrary) Lookup(symbol string) error { + sym := C.CString(symbol) + defer C.free(unsafe.Pointer(sym)) + + C.dlerror() // Clear out any previous errors + C.dlsym(dl.handle, sym) + err := C.dlerror() + if unsafe.Pointer(err) == C.NULL { + return nil + } + return fmt.Errorf("%s", C.GoString(err)) +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers.go new file mode 100644 index 0000000..b04f366 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers.go @@ -0,0 +1,64 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +import ( + "unsafe" +) + +import "C" + +var cgoAllocsUnknown = new(struct{}) + +type stringHeader struct { + Data unsafe.Pointer + Len int +} + +func clen(n []byte) int { + for i := 0; i < len(n); i++ { + if n[i] == 0 { + return i + } + } + return len(n) +} + +func uint32SliceToIntSlice(s []uint32) []int { + ret := make([]int, len(s)) + for i := range s { + ret[i] = int(s[i]) + } + return ret +} + +// packPCharString creates a Go string backed by *C.char and avoids copying. +func packPCharString(p *C.char) (raw string) { + if p != nil && *p != 0 { + h := (*stringHeader)(unsafe.Pointer(&raw)) + h.Data = unsafe.Pointer(p) + for *p != 0 { + p = (*C.char)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + 1)) // p++ + } + h.Len = int(uintptr(unsafe.Pointer(p)) - uintptr(h.Data)) + } + return +} + +// unpackPCharString represents the data from Go string as *C.char and avoids copying. +func unpackPCharString(str string) (*C.char, *struct{}) { + h := (*stringHeader)(unsafe.Pointer(&str)) + return (*C.char)(h.Data), cgoAllocsUnknown +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers.h b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers.h new file mode 100644 index 0000000..b25c5e5 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/cgo_helpers.h @@ -0,0 +1,23 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. +// Code generated by https://git.io/c-for-go. DO NOT EDIT. + +#include "nvml.h" +#include +#pragma once + +#define __CGOGEN 1 + diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const.go new file mode 100644 index 0000000..1a0efaf --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const.go @@ -0,0 +1,1375 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. +// Code generated by https://git.io/c-for-go. DO NOT EDIT. + +package nvml + +/* +#cgo LDFLAGS: -Wl,--unresolved-symbols=ignore-in-object-files +#cgo CFLAGS: -DNVML_NO_UNVERSIONED_FUNC_DEFS=1 +#include "nvml.h" +#include +#include "cgo_helpers.h" +*/ +import "C" + +const ( + // NO_UNVERSIONED_FUNC_DEFS as defined in go-nvml/:24 + NO_UNVERSIONED_FUNC_DEFS = 1 + // API_VERSION as defined in nvml/nvml.h + API_VERSION = 11 + // API_VERSION_STR as defined in nvml/nvml.h + API_VERSION_STR = "11" + // VALUE_NOT_AVAILABLE as defined in nvml/nvml.h + VALUE_NOT_AVAILABLE = -1 + // DEVICE_PCI_BUS_ID_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_PCI_BUS_ID_BUFFER_SIZE = 32 + // DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE as defined in nvml/nvml.h + DEVICE_PCI_BUS_ID_BUFFER_V2_SIZE = 16 + // DEVICE_PCI_BUS_ID_LEGACY_FMT as defined in nvml/nvml.h + DEVICE_PCI_BUS_ID_LEGACY_FMT = "%04X:%02X:%02X.0" + // DEVICE_PCI_BUS_ID_FMT as defined in nvml/nvml.h + DEVICE_PCI_BUS_ID_FMT = "%08X:%02X:%02X.0" + // NVLINK_MAX_LINKS as defined in nvml/nvml.h + NVLINK_MAX_LINKS = 18 + // TOPOLOGY_CPU as defined in nvml/nvml.h + TOPOLOGY_CPU = 0 + // MAX_PHYSICAL_BRIDGE as defined in nvml/nvml.h + MAX_PHYSICAL_BRIDGE = 128 + // MAX_THERMAL_SENSORS_PER_GPU as defined in nvml/nvml.h + MAX_THERMAL_SENSORS_PER_GPU = 3 + // FlagDefault as defined in nvml/nvml.h + FlagDefault = 0 + // FlagForce as defined in nvml/nvml.h + FlagForce = 1 + // SINGLE_BIT_ECC as defined in nvml/nvml.h + SINGLE_BIT_ECC = 0 + // DOUBLE_BIT_ECC as defined in nvml/nvml.h + DOUBLE_BIT_ECC = 0 + // MAX_GPU_PERF_PSTATES as defined in nvml/nvml.h + MAX_GPU_PERF_PSTATES = 16 + // GRID_LICENSE_EXPIRY_NOT_AVAILABLE as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_NOT_AVAILABLE = 0 + // GRID_LICENSE_EXPIRY_INVALID as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_INVALID = 1 + // GRID_LICENSE_EXPIRY_VALID as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_VALID = 2 + // GRID_LICENSE_EXPIRY_NOT_APPLICABLE as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_NOT_APPLICABLE = 3 + // GRID_LICENSE_EXPIRY_PERMANENT as defined in nvml/nvml.h + GRID_LICENSE_EXPIRY_PERMANENT = 4 + // GRID_LICENSE_BUFFER_SIZE as defined in nvml/nvml.h + GRID_LICENSE_BUFFER_SIZE = 128 + // VGPU_NAME_BUFFER_SIZE as defined in nvml/nvml.h + VGPU_NAME_BUFFER_SIZE = 64 + // GRID_LICENSE_FEATURE_MAX_COUNT as defined in nvml/nvml.h + GRID_LICENSE_FEATURE_MAX_COUNT = 3 + // VGPU_SCHEDULER_POLICY_UNKNOWN as defined in nvml/nvml.h + VGPU_SCHEDULER_POLICY_UNKNOWN = 0 + // VGPU_SCHEDULER_POLICY_BEST_EFFORT as defined in nvml/nvml.h + VGPU_SCHEDULER_POLICY_BEST_EFFORT = 1 + // VGPU_SCHEDULER_POLICY_EQUAL_SHARE as defined in nvml/nvml.h + VGPU_SCHEDULER_POLICY_EQUAL_SHARE = 2 + // VGPU_SCHEDULER_POLICY_FIXED_SHARE as defined in nvml/nvml.h + VGPU_SCHEDULER_POLICY_FIXED_SHARE = 3 + // SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT as defined in nvml/nvml.h + SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT = 3 + // SCHEDULER_SW_MAX_LOG_ENTRIES as defined in nvml/nvml.h + SCHEDULER_SW_MAX_LOG_ENTRIES = 200 + // GRID_LICENSE_STATE_UNKNOWN as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNKNOWN = 0 + // GRID_LICENSE_STATE_UNINITIALIZED as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNINITIALIZED = 1 + // GRID_LICENSE_STATE_UNLICENSED_UNRESTRICTED as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNLICENSED_UNRESTRICTED = 2 + // GRID_LICENSE_STATE_UNLICENSED_RESTRICTED as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNLICENSED_RESTRICTED = 3 + // GRID_LICENSE_STATE_UNLICENSED as defined in nvml/nvml.h + GRID_LICENSE_STATE_UNLICENSED = 4 + // GRID_LICENSE_STATE_LICENSED as defined in nvml/nvml.h + GRID_LICENSE_STATE_LICENSED = 5 + // GSP_FIRMWARE_VERSION_BUF_SIZE as defined in nvml/nvml.h + GSP_FIRMWARE_VERSION_BUF_SIZE = 64 + // DEVICE_ARCH_KEPLER as defined in nvml/nvml.h + DEVICE_ARCH_KEPLER = 2 + // DEVICE_ARCH_MAXWELL as defined in nvml/nvml.h + DEVICE_ARCH_MAXWELL = 3 + // DEVICE_ARCH_PASCAL as defined in nvml/nvml.h + DEVICE_ARCH_PASCAL = 4 + // DEVICE_ARCH_VOLTA as defined in nvml/nvml.h + DEVICE_ARCH_VOLTA = 5 + // DEVICE_ARCH_TURING as defined in nvml/nvml.h + DEVICE_ARCH_TURING = 6 + // DEVICE_ARCH_AMPERE as defined in nvml/nvml.h + DEVICE_ARCH_AMPERE = 7 + // DEVICE_ARCH_ADA as defined in nvml/nvml.h + DEVICE_ARCH_ADA = 8 + // DEVICE_ARCH_HOPPER as defined in nvml/nvml.h + DEVICE_ARCH_HOPPER = 9 + // DEVICE_ARCH_UNKNOWN as defined in nvml/nvml.h + DEVICE_ARCH_UNKNOWN = 4294967295 + // BUS_TYPE_UNKNOWN as defined in nvml/nvml.h + BUS_TYPE_UNKNOWN = 0 + // BUS_TYPE_PCI as defined in nvml/nvml.h + BUS_TYPE_PCI = 1 + // BUS_TYPE_PCIE as defined in nvml/nvml.h + BUS_TYPE_PCIE = 2 + // BUS_TYPE_FPCI as defined in nvml/nvml.h + BUS_TYPE_FPCI = 3 + // BUS_TYPE_AGP as defined in nvml/nvml.h + BUS_TYPE_AGP = 4 + // FAN_POLICY_TEMPERATURE_CONTINOUS_SW as defined in nvml/nvml.h + FAN_POLICY_TEMPERATURE_CONTINOUS_SW = 0 + // FAN_POLICY_MANUAL as defined in nvml/nvml.h + FAN_POLICY_MANUAL = 1 + // POWER_SOURCE_AC as defined in nvml/nvml.h + POWER_SOURCE_AC = 0 + // POWER_SOURCE_BATTERY as defined in nvml/nvml.h + POWER_SOURCE_BATTERY = 1 + // PCIE_LINK_MAX_SPEED_INVALID as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_INVALID = 0 + // PCIE_LINK_MAX_SPEED_2500MBPS as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_2500MBPS = 1 + // PCIE_LINK_MAX_SPEED_5000MBPS as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_5000MBPS = 2 + // PCIE_LINK_MAX_SPEED_8000MBPS as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_8000MBPS = 3 + // PCIE_LINK_MAX_SPEED_16000MBPS as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_16000MBPS = 4 + // PCIE_LINK_MAX_SPEED_32000MBPS as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_32000MBPS = 5 + // PCIE_LINK_MAX_SPEED_64000MBPS as defined in nvml/nvml.h + PCIE_LINK_MAX_SPEED_64000MBPS = 6 + // ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED as defined in nvml/nvml.h + ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED = 0 + // ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED as defined in nvml/nvml.h + ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED = 1 + // MAX_GPU_UTILIZATIONS as defined in nvml/nvml.h + MAX_GPU_UTILIZATIONS = 8 + // FI_DEV_ECC_CURRENT as defined in nvml/nvml.h + FI_DEV_ECC_CURRENT = 1 + // FI_DEV_ECC_PENDING as defined in nvml/nvml.h + FI_DEV_ECC_PENDING = 2 + // FI_DEV_ECC_SBE_VOL_TOTAL as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_TOTAL = 3 + // FI_DEV_ECC_DBE_VOL_TOTAL as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_TOTAL = 4 + // FI_DEV_ECC_SBE_AGG_TOTAL as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_TOTAL = 5 + // FI_DEV_ECC_DBE_AGG_TOTAL as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_TOTAL = 6 + // FI_DEV_ECC_SBE_VOL_L1 as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_L1 = 7 + // FI_DEV_ECC_DBE_VOL_L1 as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_L1 = 8 + // FI_DEV_ECC_SBE_VOL_L2 as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_L2 = 9 + // FI_DEV_ECC_DBE_VOL_L2 as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_L2 = 10 + // FI_DEV_ECC_SBE_VOL_DEV as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_DEV = 11 + // FI_DEV_ECC_DBE_VOL_DEV as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_DEV = 12 + // FI_DEV_ECC_SBE_VOL_REG as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_REG = 13 + // FI_DEV_ECC_DBE_VOL_REG as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_REG = 14 + // FI_DEV_ECC_SBE_VOL_TEX as defined in nvml/nvml.h + FI_DEV_ECC_SBE_VOL_TEX = 15 + // FI_DEV_ECC_DBE_VOL_TEX as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_TEX = 16 + // FI_DEV_ECC_DBE_VOL_CBU as defined in nvml/nvml.h + FI_DEV_ECC_DBE_VOL_CBU = 17 + // FI_DEV_ECC_SBE_AGG_L1 as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_L1 = 18 + // FI_DEV_ECC_DBE_AGG_L1 as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_L1 = 19 + // FI_DEV_ECC_SBE_AGG_L2 as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_L2 = 20 + // FI_DEV_ECC_DBE_AGG_L2 as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_L2 = 21 + // FI_DEV_ECC_SBE_AGG_DEV as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_DEV = 22 + // FI_DEV_ECC_DBE_AGG_DEV as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_DEV = 23 + // FI_DEV_ECC_SBE_AGG_REG as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_REG = 24 + // FI_DEV_ECC_DBE_AGG_REG as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_REG = 25 + // FI_DEV_ECC_SBE_AGG_TEX as defined in nvml/nvml.h + FI_DEV_ECC_SBE_AGG_TEX = 26 + // FI_DEV_ECC_DBE_AGG_TEX as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_TEX = 27 + // FI_DEV_ECC_DBE_AGG_CBU as defined in nvml/nvml.h + FI_DEV_ECC_DBE_AGG_CBU = 28 + // FI_DEV_RETIRED_SBE as defined in nvml/nvml.h + FI_DEV_RETIRED_SBE = 29 + // FI_DEV_RETIRED_DBE as defined in nvml/nvml.h + FI_DEV_RETIRED_DBE = 30 + // FI_DEV_RETIRED_PENDING as defined in nvml/nvml.h + FI_DEV_RETIRED_PENDING = 31 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L0 = 32 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L1 = 33 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L2 = 34 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L3 = 35 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L4 = 36 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L5 = 37 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_TOTAL = 38 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L0 = 39 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L1 = 40 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L2 = 41 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L3 = 42 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L4 = 43 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L5 = 44 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_TOTAL = 45 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L0 = 46 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L1 = 47 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L2 = 48 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L3 = 49 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L4 = 50 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L5 = 51 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_TOTAL = 52 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L0 = 53 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L1 = 54 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L2 = 55 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L3 = 56 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L4 = 57 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L5 = 58 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_TOTAL = 59 + // FI_DEV_NVLINK_BANDWIDTH_C0_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L0 = 60 + // FI_DEV_NVLINK_BANDWIDTH_C0_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L1 = 61 + // FI_DEV_NVLINK_BANDWIDTH_C0_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L2 = 62 + // FI_DEV_NVLINK_BANDWIDTH_C0_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L3 = 63 + // FI_DEV_NVLINK_BANDWIDTH_C0_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L4 = 64 + // FI_DEV_NVLINK_BANDWIDTH_C0_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L5 = 65 + // FI_DEV_NVLINK_BANDWIDTH_C0_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_TOTAL = 66 + // FI_DEV_NVLINK_BANDWIDTH_C1_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L0 = 67 + // FI_DEV_NVLINK_BANDWIDTH_C1_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L1 = 68 + // FI_DEV_NVLINK_BANDWIDTH_C1_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L2 = 69 + // FI_DEV_NVLINK_BANDWIDTH_C1_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L3 = 70 + // FI_DEV_NVLINK_BANDWIDTH_C1_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L4 = 71 + // FI_DEV_NVLINK_BANDWIDTH_C1_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L5 = 72 + // FI_DEV_NVLINK_BANDWIDTH_C1_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_TOTAL = 73 + // FI_DEV_PERF_POLICY_POWER as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_POWER = 74 + // FI_DEV_PERF_POLICY_THERMAL as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_THERMAL = 75 + // FI_DEV_PERF_POLICY_SYNC_BOOST as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_SYNC_BOOST = 76 + // FI_DEV_PERF_POLICY_BOARD_LIMIT as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_BOARD_LIMIT = 77 + // FI_DEV_PERF_POLICY_LOW_UTILIZATION as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_LOW_UTILIZATION = 78 + // FI_DEV_PERF_POLICY_RELIABILITY as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_RELIABILITY = 79 + // FI_DEV_PERF_POLICY_TOTAL_APP_CLOCKS as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_TOTAL_APP_CLOCKS = 80 + // FI_DEV_PERF_POLICY_TOTAL_BASE_CLOCKS as defined in nvml/nvml.h + FI_DEV_PERF_POLICY_TOTAL_BASE_CLOCKS = 81 + // FI_DEV_MEMORY_TEMP as defined in nvml/nvml.h + FI_DEV_MEMORY_TEMP = 82 + // FI_DEV_TOTAL_ENERGY_CONSUMPTION as defined in nvml/nvml.h + FI_DEV_TOTAL_ENERGY_CONSUMPTION = 83 + // FI_DEV_NVLINK_SPEED_MBPS_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L0 = 84 + // FI_DEV_NVLINK_SPEED_MBPS_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L1 = 85 + // FI_DEV_NVLINK_SPEED_MBPS_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L2 = 86 + // FI_DEV_NVLINK_SPEED_MBPS_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L3 = 87 + // FI_DEV_NVLINK_SPEED_MBPS_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L4 = 88 + // FI_DEV_NVLINK_SPEED_MBPS_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L5 = 89 + // FI_DEV_NVLINK_SPEED_MBPS_COMMON as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_COMMON = 90 + // FI_DEV_NVLINK_LINK_COUNT as defined in nvml/nvml.h + FI_DEV_NVLINK_LINK_COUNT = 91 + // FI_DEV_RETIRED_PENDING_SBE as defined in nvml/nvml.h + FI_DEV_RETIRED_PENDING_SBE = 92 + // FI_DEV_RETIRED_PENDING_DBE as defined in nvml/nvml.h + FI_DEV_RETIRED_PENDING_DBE = 93 + // FI_DEV_PCIE_REPLAY_COUNTER as defined in nvml/nvml.h + FI_DEV_PCIE_REPLAY_COUNTER = 94 + // FI_DEV_PCIE_REPLAY_ROLLOVER_COUNTER as defined in nvml/nvml.h + FI_DEV_PCIE_REPLAY_ROLLOVER_COUNTER = 95 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L6 = 96 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L7 = 97 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L8 = 98 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L9 = 99 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L10 = 100 + // FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_FLIT_ERROR_COUNT_L11 = 101 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L6 = 102 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L7 = 103 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L8 = 104 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L9 = 105 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L10 = 106 + // FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_CRC_DATA_ERROR_COUNT_L11 = 107 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L6 = 108 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L7 = 109 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L8 = 110 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L9 = 111 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L10 = 112 + // FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_REPLAY_ERROR_COUNT_L11 = 113 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L6 = 114 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L7 = 115 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L8 = 116 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L9 = 117 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L10 = 118 + // FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_RECOVERY_ERROR_COUNT_L11 = 119 + // FI_DEV_NVLINK_BANDWIDTH_C0_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L6 = 120 + // FI_DEV_NVLINK_BANDWIDTH_C0_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L7 = 121 + // FI_DEV_NVLINK_BANDWIDTH_C0_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L8 = 122 + // FI_DEV_NVLINK_BANDWIDTH_C0_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L9 = 123 + // FI_DEV_NVLINK_BANDWIDTH_C0_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L10 = 124 + // FI_DEV_NVLINK_BANDWIDTH_C0_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C0_L11 = 125 + // FI_DEV_NVLINK_BANDWIDTH_C1_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L6 = 126 + // FI_DEV_NVLINK_BANDWIDTH_C1_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L7 = 127 + // FI_DEV_NVLINK_BANDWIDTH_C1_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L8 = 128 + // FI_DEV_NVLINK_BANDWIDTH_C1_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L9 = 129 + // FI_DEV_NVLINK_BANDWIDTH_C1_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L10 = 130 + // FI_DEV_NVLINK_BANDWIDTH_C1_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_BANDWIDTH_C1_L11 = 131 + // FI_DEV_NVLINK_SPEED_MBPS_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L6 = 132 + // FI_DEV_NVLINK_SPEED_MBPS_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L7 = 133 + // FI_DEV_NVLINK_SPEED_MBPS_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L8 = 134 + // FI_DEV_NVLINK_SPEED_MBPS_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L9 = 135 + // FI_DEV_NVLINK_SPEED_MBPS_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L10 = 136 + // FI_DEV_NVLINK_SPEED_MBPS_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_SPEED_MBPS_L11 = 137 + // FI_DEV_NVLINK_THROUGHPUT_DATA_TX as defined in nvml/nvml.h + FI_DEV_NVLINK_THROUGHPUT_DATA_TX = 138 + // FI_DEV_NVLINK_THROUGHPUT_DATA_RX as defined in nvml/nvml.h + FI_DEV_NVLINK_THROUGHPUT_DATA_RX = 139 + // FI_DEV_NVLINK_THROUGHPUT_RAW_TX as defined in nvml/nvml.h + FI_DEV_NVLINK_THROUGHPUT_RAW_TX = 140 + // FI_DEV_NVLINK_THROUGHPUT_RAW_RX as defined in nvml/nvml.h + FI_DEV_NVLINK_THROUGHPUT_RAW_RX = 141 + // FI_DEV_REMAPPED_COR as defined in nvml/nvml.h + FI_DEV_REMAPPED_COR = 142 + // FI_DEV_REMAPPED_UNC as defined in nvml/nvml.h + FI_DEV_REMAPPED_UNC = 143 + // FI_DEV_REMAPPED_PENDING as defined in nvml/nvml.h + FI_DEV_REMAPPED_PENDING = 144 + // FI_DEV_REMAPPED_FAILURE as defined in nvml/nvml.h + FI_DEV_REMAPPED_FAILURE = 145 + // FI_DEV_NVLINK_REMOTE_NVLINK_ID as defined in nvml/nvml.h + FI_DEV_NVLINK_REMOTE_NVLINK_ID = 146 + // FI_DEV_NVSWITCH_CONNECTED_LINK_COUNT as defined in nvml/nvml.h + FI_DEV_NVSWITCH_CONNECTED_LINK_COUNT = 147 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L0 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L0 = 148 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L1 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L1 = 149 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2 = 150 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3 = 151 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4 = 152 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5 = 153 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6 = 154 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7 = 155 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8 = 156 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9 = 157 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10 = 158 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L11 as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L11 = 159 + // FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_TOTAL as defined in nvml/nvml.h + FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_TOTAL = 160 + // FI_DEV_NVLINK_ERROR_DL_REPLAY as defined in nvml/nvml.h + FI_DEV_NVLINK_ERROR_DL_REPLAY = 161 + // FI_DEV_NVLINK_ERROR_DL_RECOVERY as defined in nvml/nvml.h + FI_DEV_NVLINK_ERROR_DL_RECOVERY = 162 + // FI_DEV_NVLINK_ERROR_DL_CRC as defined in nvml/nvml.h + FI_DEV_NVLINK_ERROR_DL_CRC = 163 + // FI_DEV_NVLINK_GET_SPEED as defined in nvml/nvml.h + FI_DEV_NVLINK_GET_SPEED = 164 + // FI_DEV_NVLINK_GET_STATE as defined in nvml/nvml.h + FI_DEV_NVLINK_GET_STATE = 165 + // FI_DEV_NVLINK_GET_VERSION as defined in nvml/nvml.h + FI_DEV_NVLINK_GET_VERSION = 166 + // FI_DEV_NVLINK_GET_POWER_STATE as defined in nvml/nvml.h + FI_DEV_NVLINK_GET_POWER_STATE = 167 + // FI_DEV_NVLINK_GET_POWER_THRESHOLD as defined in nvml/nvml.h + FI_DEV_NVLINK_GET_POWER_THRESHOLD = 168 + // FI_DEV_PCIE_L0_TO_RECOVERY_COUNTER as defined in nvml/nvml.h + FI_DEV_PCIE_L0_TO_RECOVERY_COUNTER = 169 + // FI_MAX as defined in nvml/nvml.h + FI_MAX = 170 + // EventTypeSingleBitEccError as defined in nvml/nvml.h + EventTypeSingleBitEccError = 1 + // EventTypeDoubleBitEccError as defined in nvml/nvml.h + EventTypeDoubleBitEccError = 2 + // EventTypePState as defined in nvml/nvml.h + EventTypePState = 4 + // EventTypeXidCriticalError as defined in nvml/nvml.h + EventTypeXidCriticalError = 8 + // EventTypeClock as defined in nvml/nvml.h + EventTypeClock = 16 + // EventTypePowerSourceChange as defined in nvml/nvml.h + EventTypePowerSourceChange = 128 + // EventMigConfigChange as defined in nvml/nvml.h + EventMigConfigChange = 256 + // EventTypeNone as defined in nvml/nvml.h + EventTypeNone = 0 + // EventTypeAll as defined in nvml/nvml.h + EventTypeAll = 415 + // ClocksThrottleReasonGpuIdle as defined in nvml/nvml.h + ClocksThrottleReasonGpuIdle = 1 + // ClocksThrottleReasonApplicationsClocksSetting as defined in nvml/nvml.h + ClocksThrottleReasonApplicationsClocksSetting = 2 + // ClocksThrottleReasonUserDefinedClocks as defined in nvml/nvml.h + ClocksThrottleReasonUserDefinedClocks = 2 + // ClocksThrottleReasonSwPowerCap as defined in nvml/nvml.h + ClocksThrottleReasonSwPowerCap = 4 + // ClocksThrottleReasonHwSlowdown as defined in nvml/nvml.h + ClocksThrottleReasonHwSlowdown = 8 + // ClocksThrottleReasonSyncBoost as defined in nvml/nvml.h + ClocksThrottleReasonSyncBoost = 16 + // ClocksThrottleReasonSwThermalSlowdown as defined in nvml/nvml.h + ClocksThrottleReasonSwThermalSlowdown = 32 + // ClocksThrottleReasonHwThermalSlowdown as defined in nvml/nvml.h + ClocksThrottleReasonHwThermalSlowdown = 64 + // ClocksThrottleReasonHwPowerBrakeSlowdown as defined in nvml/nvml.h + ClocksThrottleReasonHwPowerBrakeSlowdown = 128 + // ClocksThrottleReasonDisplayClockSetting as defined in nvml/nvml.h + ClocksThrottleReasonDisplayClockSetting = 256 + // ClocksThrottleReasonNone as defined in nvml/nvml.h + ClocksThrottleReasonNone = 0 + // ClocksThrottleReasonAll as defined in nvml/nvml.h + ClocksThrottleReasonAll = 511 + // NVFBC_SESSION_FLAG_DIFFMAP_ENABLED as defined in nvml/nvml.h + NVFBC_SESSION_FLAG_DIFFMAP_ENABLED = 1 + // NVFBC_SESSION_FLAG_CLASSIFICATIONMAP_ENABLED as defined in nvml/nvml.h + NVFBC_SESSION_FLAG_CLASSIFICATIONMAP_ENABLED = 2 + // NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_NO_WAIT as defined in nvml/nvml.h + NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_NO_WAIT = 4 + // NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_INFINITE as defined in nvml/nvml.h + NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_INFINITE = 8 + // NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_TIMEOUT as defined in nvml/nvml.h + NVFBC_SESSION_FLAG_CAPTURE_WITH_WAIT_TIMEOUT = 16 + // GPU_FABRIC_UUID_LEN as defined in nvml/nvml.h + GPU_FABRIC_UUID_LEN = 16 + // GPU_FABRIC_STATE_NOT_SUPPORTED as defined in nvml/nvml.h + GPU_FABRIC_STATE_NOT_SUPPORTED = 0 + // GPU_FABRIC_STATE_NOT_STARTED as defined in nvml/nvml.h + GPU_FABRIC_STATE_NOT_STARTED = 1 + // GPU_FABRIC_STATE_IN_PROGRESS as defined in nvml/nvml.h + GPU_FABRIC_STATE_IN_PROGRESS = 2 + // GPU_FABRIC_STATE_COMPLETED as defined in nvml/nvml.h + GPU_FABRIC_STATE_COMPLETED = 3 + // INIT_FLAG_NO_GPUS as defined in nvml/nvml.h + INIT_FLAG_NO_GPUS = 1 + // INIT_FLAG_NO_ATTACH as defined in nvml/nvml.h + INIT_FLAG_NO_ATTACH = 2 + // DEVICE_INFOROM_VERSION_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_INFOROM_VERSION_BUFFER_SIZE = 16 + // DEVICE_UUID_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_UUID_BUFFER_SIZE = 80 + // DEVICE_UUID_V2_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_UUID_V2_BUFFER_SIZE = 96 + // DEVICE_PART_NUMBER_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_PART_NUMBER_BUFFER_SIZE = 80 + // SYSTEM_DRIVER_VERSION_BUFFER_SIZE as defined in nvml/nvml.h + SYSTEM_DRIVER_VERSION_BUFFER_SIZE = 80 + // SYSTEM_NVML_VERSION_BUFFER_SIZE as defined in nvml/nvml.h + SYSTEM_NVML_VERSION_BUFFER_SIZE = 80 + // DEVICE_NAME_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_NAME_BUFFER_SIZE = 64 + // DEVICE_NAME_V2_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_NAME_V2_BUFFER_SIZE = 96 + // DEVICE_SERIAL_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_SERIAL_BUFFER_SIZE = 30 + // DEVICE_VBIOS_VERSION_BUFFER_SIZE as defined in nvml/nvml.h + DEVICE_VBIOS_VERSION_BUFFER_SIZE = 32 + // AFFINITY_SCOPE_NODE as defined in nvml/nvml.h + AFFINITY_SCOPE_NODE = 0 + // AFFINITY_SCOPE_SOCKET as defined in nvml/nvml.h + AFFINITY_SCOPE_SOCKET = 1 + // DEVICE_MIG_DISABLE as defined in nvml/nvml.h + DEVICE_MIG_DISABLE = 0 + // DEVICE_MIG_ENABLE as defined in nvml/nvml.h + DEVICE_MIG_ENABLE = 1 + // GPU_INSTANCE_PROFILE_1_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_1_SLICE = 0 + // GPU_INSTANCE_PROFILE_2_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_2_SLICE = 1 + // GPU_INSTANCE_PROFILE_3_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_3_SLICE = 2 + // GPU_INSTANCE_PROFILE_4_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_4_SLICE = 3 + // GPU_INSTANCE_PROFILE_7_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_7_SLICE = 4 + // GPU_INSTANCE_PROFILE_8_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_8_SLICE = 5 + // GPU_INSTANCE_PROFILE_6_SLICE as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_6_SLICE = 6 + // GPU_INSTANCE_PROFILE_1_SLICE_REV1 as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_1_SLICE_REV1 = 7 + // GPU_INSTANCE_PROFILE_2_SLICE_REV1 as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_2_SLICE_REV1 = 8 + // GPU_INSTANCE_PROFILE_1_SLICE_REV2 as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_1_SLICE_REV2 = 9 + // GPU_INSTANCE_PROFILE_COUNT as defined in nvml/nvml.h + GPU_INSTANCE_PROFILE_COUNT = 10 + // COMPUTE_INSTANCE_PROFILE_1_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_1_SLICE = 0 + // COMPUTE_INSTANCE_PROFILE_2_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_2_SLICE = 1 + // COMPUTE_INSTANCE_PROFILE_3_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_3_SLICE = 2 + // COMPUTE_INSTANCE_PROFILE_4_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_4_SLICE = 3 + // COMPUTE_INSTANCE_PROFILE_7_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_7_SLICE = 4 + // COMPUTE_INSTANCE_PROFILE_8_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_8_SLICE = 5 + // COMPUTE_INSTANCE_PROFILE_6_SLICE as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_6_SLICE = 6 + // COMPUTE_INSTANCE_PROFILE_1_SLICE_REV1 as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_1_SLICE_REV1 = 7 + // COMPUTE_INSTANCE_PROFILE_COUNT as defined in nvml/nvml.h + COMPUTE_INSTANCE_PROFILE_COUNT = 8 + // COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED as defined in nvml/nvml.h + COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED = 0 + // COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT as defined in nvml/nvml.h + COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT = 1 + // GPM_METRICS_GET_VERSION as defined in nvml/nvml.h + GPM_METRICS_GET_VERSION = 1 + // GPM_SUPPORT_VERSION as defined in nvml/nvml.h + GPM_SUPPORT_VERSION = 1 + // COUNTER_COLLECTION_UNIT_STREAM_STATE_DISABLE as defined in nvml/nvml.h + COUNTER_COLLECTION_UNIT_STREAM_STATE_DISABLE = 0 + // COUNTER_COLLECTION_UNIT_STREAM_STATE_ENABLE as defined in nvml/nvml.h + COUNTER_COLLECTION_UNIT_STREAM_STATE_ENABLE = 1 + // NVLINK_POWER_STATE_HIGH_SPEED as defined in nvml/nvml.h + NVLINK_POWER_STATE_HIGH_SPEED = 0 + // NVLINK_POWER_STATE_LOW as defined in nvml/nvml.h + NVLINK_POWER_STATE_LOW = 1 + // NVLINK_LOW_POWER_THRESHOLD_MIN as defined in nvml/nvml.h + NVLINK_LOW_POWER_THRESHOLD_MIN = 1 + // NVLINK_LOW_POWER_THRESHOLD_MAX as defined in nvml/nvml.h + NVLINK_LOW_POWER_THRESHOLD_MAX = 8191 + // NVLINK_LOW_POWER_THRESHOLD_RESET as defined in nvml/nvml.h + NVLINK_LOW_POWER_THRESHOLD_RESET = 4294967295 +) + +// BridgeChipType as declared in nvml/nvml.h +type BridgeChipType int32 + +// BridgeChipType enumeration from nvml/nvml.h +const ( + BRIDGE_CHIP_PLX BridgeChipType = iota + BRIDGE_CHIP_BRO4 BridgeChipType = 1 +) + +// NvLinkUtilizationCountUnits as declared in nvml/nvml.h +type NvLinkUtilizationCountUnits int32 + +// NvLinkUtilizationCountUnits enumeration from nvml/nvml.h +const ( + NVLINK_COUNTER_UNIT_CYCLES NvLinkUtilizationCountUnits = iota + NVLINK_COUNTER_UNIT_PACKETS NvLinkUtilizationCountUnits = 1 + NVLINK_COUNTER_UNIT_BYTES NvLinkUtilizationCountUnits = 2 + NVLINK_COUNTER_UNIT_RESERVED NvLinkUtilizationCountUnits = 3 + NVLINK_COUNTER_UNIT_COUNT NvLinkUtilizationCountUnits = 4 +) + +// NvLinkUtilizationCountPktTypes as declared in nvml/nvml.h +type NvLinkUtilizationCountPktTypes int32 + +// NvLinkUtilizationCountPktTypes enumeration from nvml/nvml.h +const ( + NVLINK_COUNTER_PKTFILTER_NOP NvLinkUtilizationCountPktTypes = 1 + NVLINK_COUNTER_PKTFILTER_READ NvLinkUtilizationCountPktTypes = 2 + NVLINK_COUNTER_PKTFILTER_WRITE NvLinkUtilizationCountPktTypes = 4 + NVLINK_COUNTER_PKTFILTER_RATOM NvLinkUtilizationCountPktTypes = 8 + NVLINK_COUNTER_PKTFILTER_NRATOM NvLinkUtilizationCountPktTypes = 16 + NVLINK_COUNTER_PKTFILTER_FLUSH NvLinkUtilizationCountPktTypes = 32 + NVLINK_COUNTER_PKTFILTER_RESPDATA NvLinkUtilizationCountPktTypes = 64 + NVLINK_COUNTER_PKTFILTER_RESPNODATA NvLinkUtilizationCountPktTypes = 128 + NVLINK_COUNTER_PKTFILTER_ALL NvLinkUtilizationCountPktTypes = 255 +) + +// NvLinkCapability as declared in nvml/nvml.h +type NvLinkCapability int32 + +// NvLinkCapability enumeration from nvml/nvml.h +const ( + NVLINK_CAP_P2P_SUPPORTED NvLinkCapability = iota + NVLINK_CAP_SYSMEM_ACCESS NvLinkCapability = 1 + NVLINK_CAP_P2P_ATOMICS NvLinkCapability = 2 + NVLINK_CAP_SYSMEM_ATOMICS NvLinkCapability = 3 + NVLINK_CAP_SLI_BRIDGE NvLinkCapability = 4 + NVLINK_CAP_VALID NvLinkCapability = 5 + NVLINK_CAP_COUNT NvLinkCapability = 6 +) + +// NvLinkErrorCounter as declared in nvml/nvml.h +type NvLinkErrorCounter int32 + +// NvLinkErrorCounter enumeration from nvml/nvml.h +const ( + NVLINK_ERROR_DL_REPLAY NvLinkErrorCounter = iota + NVLINK_ERROR_DL_RECOVERY NvLinkErrorCounter = 1 + NVLINK_ERROR_DL_CRC_FLIT NvLinkErrorCounter = 2 + NVLINK_ERROR_DL_CRC_DATA NvLinkErrorCounter = 3 + NVLINK_ERROR_DL_ECC_DATA NvLinkErrorCounter = 4 + NVLINK_ERROR_COUNT NvLinkErrorCounter = 5 +) + +// IntNvLinkDeviceType as declared in nvml/nvml.h +type IntNvLinkDeviceType int32 + +// IntNvLinkDeviceType enumeration from nvml/nvml.h +const ( + NVLINK_DEVICE_TYPE_GPU IntNvLinkDeviceType = iota + NVLINK_DEVICE_TYPE_IBMNPU IntNvLinkDeviceType = 1 + NVLINK_DEVICE_TYPE_SWITCH IntNvLinkDeviceType = 2 + NVLINK_DEVICE_TYPE_UNKNOWN IntNvLinkDeviceType = 255 +) + +// GpuTopologyLevel as declared in nvml/nvml.h +type GpuTopologyLevel int32 + +// GpuTopologyLevel enumeration from nvml/nvml.h +const ( + TOPOLOGY_INTERNAL GpuTopologyLevel = iota + TOPOLOGY_SINGLE GpuTopologyLevel = 10 + TOPOLOGY_MULTIPLE GpuTopologyLevel = 20 + TOPOLOGY_HOSTBRIDGE GpuTopologyLevel = 30 + TOPOLOGY_NODE GpuTopologyLevel = 40 + TOPOLOGY_SYSTEM GpuTopologyLevel = 50 +) + +// GpuP2PStatus as declared in nvml/nvml.h +type GpuP2PStatus int32 + +// GpuP2PStatus enumeration from nvml/nvml.h +const ( + P2P_STATUS_OK GpuP2PStatus = iota + P2P_STATUS_CHIPSET_NOT_SUPPORED GpuP2PStatus = 1 + P2P_STATUS_GPU_NOT_SUPPORTED GpuP2PStatus = 2 + P2P_STATUS_IOH_TOPOLOGY_NOT_SUPPORTED GpuP2PStatus = 3 + P2P_STATUS_DISABLED_BY_REGKEY GpuP2PStatus = 4 + P2P_STATUS_NOT_SUPPORTED GpuP2PStatus = 5 + P2P_STATUS_UNKNOWN GpuP2PStatus = 6 +) + +// GpuP2PCapsIndex as declared in nvml/nvml.h +type GpuP2PCapsIndex int32 + +// GpuP2PCapsIndex enumeration from nvml/nvml.h +const ( + P2P_CAPS_INDEX_READ GpuP2PCapsIndex = iota + P2P_CAPS_INDEX_WRITE GpuP2PCapsIndex = 1 + P2P_CAPS_INDEX_NVLINK GpuP2PCapsIndex = 2 + P2P_CAPS_INDEX_ATOMICS GpuP2PCapsIndex = 3 + P2P_CAPS_INDEX_PROP GpuP2PCapsIndex = 4 + P2P_CAPS_INDEX_UNKNOWN GpuP2PCapsIndex = 5 +) + +// SamplingType as declared in nvml/nvml.h +type SamplingType int32 + +// SamplingType enumeration from nvml/nvml.h +const ( + TOTAL_POWER_SAMPLES SamplingType = iota + GPU_UTILIZATION_SAMPLES SamplingType = 1 + MEMORY_UTILIZATION_SAMPLES SamplingType = 2 + ENC_UTILIZATION_SAMPLES SamplingType = 3 + DEC_UTILIZATION_SAMPLES SamplingType = 4 + PROCESSOR_CLK_SAMPLES SamplingType = 5 + MEMORY_CLK_SAMPLES SamplingType = 6 + SAMPLINGTYPE_COUNT SamplingType = 7 +) + +// PcieUtilCounter as declared in nvml/nvml.h +type PcieUtilCounter int32 + +// PcieUtilCounter enumeration from nvml/nvml.h +const ( + PCIE_UTIL_TX_BYTES PcieUtilCounter = iota + PCIE_UTIL_RX_BYTES PcieUtilCounter = 1 + PCIE_UTIL_COUNT PcieUtilCounter = 2 +) + +// ValueType as declared in nvml/nvml.h +type ValueType int32 + +// ValueType enumeration from nvml/nvml.h +const ( + VALUE_TYPE_DOUBLE ValueType = iota + VALUE_TYPE_UNSIGNED_INT ValueType = 1 + VALUE_TYPE_UNSIGNED_LONG ValueType = 2 + VALUE_TYPE_UNSIGNED_LONG_LONG ValueType = 3 + VALUE_TYPE_SIGNED_LONG_LONG ValueType = 4 + VALUE_TYPE_COUNT ValueType = 5 +) + +// PerfPolicyType as declared in nvml/nvml.h +type PerfPolicyType int32 + +// PerfPolicyType enumeration from nvml/nvml.h +const ( + PERF_POLICY_POWER PerfPolicyType = iota + PERF_POLICY_THERMAL PerfPolicyType = 1 + PERF_POLICY_SYNC_BOOST PerfPolicyType = 2 + PERF_POLICY_BOARD_LIMIT PerfPolicyType = 3 + PERF_POLICY_LOW_UTILIZATION PerfPolicyType = 4 + PERF_POLICY_RELIABILITY PerfPolicyType = 5 + PERF_POLICY_TOTAL_APP_CLOCKS PerfPolicyType = 10 + PERF_POLICY_TOTAL_BASE_CLOCKS PerfPolicyType = 11 + PERF_POLICY_COUNT PerfPolicyType = 12 +) + +// EnableState as declared in nvml/nvml.h +type EnableState int32 + +// EnableState enumeration from nvml/nvml.h +const ( + FEATURE_DISABLED EnableState = iota + FEATURE_ENABLED EnableState = 1 +) + +// BrandType as declared in nvml/nvml.h +type BrandType int32 + +// BrandType enumeration from nvml/nvml.h +const ( + BRAND_UNKNOWN BrandType = iota + BRAND_QUADRO BrandType = 1 + BRAND_TESLA BrandType = 2 + BRAND_NVS BrandType = 3 + BRAND_GRID BrandType = 4 + BRAND_GEFORCE BrandType = 5 + BRAND_TITAN BrandType = 6 + BRAND_NVIDIA_VAPPS BrandType = 7 + BRAND_NVIDIA_VPC BrandType = 8 + BRAND_NVIDIA_VCS BrandType = 9 + BRAND_NVIDIA_VWS BrandType = 10 + BRAND_NVIDIA_CLOUD_GAMING BrandType = 11 + BRAND_NVIDIA_VGAMING BrandType = 11 + BRAND_QUADRO_RTX BrandType = 12 + BRAND_NVIDIA_RTX BrandType = 13 + BRAND_NVIDIA BrandType = 14 + BRAND_GEFORCE_RTX BrandType = 15 + BRAND_TITAN_RTX BrandType = 16 + BRAND_COUNT BrandType = 17 +) + +// TemperatureThresholds as declared in nvml/nvml.h +type TemperatureThresholds int32 + +// TemperatureThresholds enumeration from nvml/nvml.h +const ( + TEMPERATURE_THRESHOLD_SHUTDOWN TemperatureThresholds = iota + TEMPERATURE_THRESHOLD_SLOWDOWN TemperatureThresholds = 1 + TEMPERATURE_THRESHOLD_MEM_MAX TemperatureThresholds = 2 + TEMPERATURE_THRESHOLD_GPU_MAX TemperatureThresholds = 3 + TEMPERATURE_THRESHOLD_ACOUSTIC_MIN TemperatureThresholds = 4 + TEMPERATURE_THRESHOLD_ACOUSTIC_CURR TemperatureThresholds = 5 + TEMPERATURE_THRESHOLD_ACOUSTIC_MAX TemperatureThresholds = 6 + TEMPERATURE_THRESHOLD_COUNT TemperatureThresholds = 7 +) + +// TemperatureSensors as declared in nvml/nvml.h +type TemperatureSensors int32 + +// TemperatureSensors enumeration from nvml/nvml.h +const ( + TEMPERATURE_GPU TemperatureSensors = iota + TEMPERATURE_COUNT TemperatureSensors = 1 +) + +// ComputeMode as declared in nvml/nvml.h +type ComputeMode int32 + +// ComputeMode enumeration from nvml/nvml.h +const ( + COMPUTEMODE_DEFAULT ComputeMode = iota + COMPUTEMODE_EXCLUSIVE_THREAD ComputeMode = 1 + COMPUTEMODE_PROHIBITED ComputeMode = 2 + COMPUTEMODE_EXCLUSIVE_PROCESS ComputeMode = 3 + COMPUTEMODE_COUNT ComputeMode = 4 +) + +// MemoryErrorType as declared in nvml/nvml.h +type MemoryErrorType int32 + +// MemoryErrorType enumeration from nvml/nvml.h +const ( + MEMORY_ERROR_TYPE_CORRECTED MemoryErrorType = iota + MEMORY_ERROR_TYPE_UNCORRECTED MemoryErrorType = 1 + MEMORY_ERROR_TYPE_COUNT MemoryErrorType = 2 +) + +// EccCounterType as declared in nvml/nvml.h +type EccCounterType int32 + +// EccCounterType enumeration from nvml/nvml.h +const ( + VOLATILE_ECC EccCounterType = iota + AGGREGATE_ECC EccCounterType = 1 + ECC_COUNTER_TYPE_COUNT EccCounterType = 2 +) + +// ClockType as declared in nvml/nvml.h +type ClockType int32 + +// ClockType enumeration from nvml/nvml.h +const ( + CLOCK_GRAPHICS ClockType = iota + CLOCK_SM ClockType = 1 + CLOCK_MEM ClockType = 2 + CLOCK_VIDEO ClockType = 3 + CLOCK_COUNT ClockType = 4 +) + +// ClockId as declared in nvml/nvml.h +type ClockId int32 + +// ClockId enumeration from nvml/nvml.h +const ( + CLOCK_ID_CURRENT ClockId = iota + CLOCK_ID_APP_CLOCK_TARGET ClockId = 1 + CLOCK_ID_APP_CLOCK_DEFAULT ClockId = 2 + CLOCK_ID_CUSTOMER_BOOST_MAX ClockId = 3 + CLOCK_ID_COUNT ClockId = 4 +) + +// DriverModel as declared in nvml/nvml.h +type DriverModel int32 + +// DriverModel enumeration from nvml/nvml.h +const ( + DRIVER_WDDM DriverModel = iota + DRIVER_WDM DriverModel = 1 +) + +// Pstates as declared in nvml/nvml.h +type Pstates int32 + +// Pstates enumeration from nvml/nvml.h +const ( + PSTATE_0 Pstates = iota + PSTATE_1 Pstates = 1 + PSTATE_2 Pstates = 2 + PSTATE_3 Pstates = 3 + PSTATE_4 Pstates = 4 + PSTATE_5 Pstates = 5 + PSTATE_6 Pstates = 6 + PSTATE_7 Pstates = 7 + PSTATE_8 Pstates = 8 + PSTATE_9 Pstates = 9 + PSTATE_10 Pstates = 10 + PSTATE_11 Pstates = 11 + PSTATE_12 Pstates = 12 + PSTATE_13 Pstates = 13 + PSTATE_14 Pstates = 14 + PSTATE_15 Pstates = 15 + PSTATE_UNKNOWN Pstates = 32 +) + +// GpuOperationMode as declared in nvml/nvml.h +type GpuOperationMode int32 + +// GpuOperationMode enumeration from nvml/nvml.h +const ( + GOM_ALL_ON GpuOperationMode = iota + GOM_COMPUTE GpuOperationMode = 1 + GOM_LOW_DP GpuOperationMode = 2 +) + +// InforomObject as declared in nvml/nvml.h +type InforomObject int32 + +// InforomObject enumeration from nvml/nvml.h +const ( + INFOROM_OEM InforomObject = iota + INFOROM_ECC InforomObject = 1 + INFOROM_POWER InforomObject = 2 + INFOROM_COUNT InforomObject = 3 +) + +// Return as declared in nvml/nvml.h +type Return int32 + +// Return enumeration from nvml/nvml.h +const ( + SUCCESS Return = iota + ERROR_UNINITIALIZED Return = 1 + ERROR_INVALID_ARGUMENT Return = 2 + ERROR_NOT_SUPPORTED Return = 3 + ERROR_NO_PERMISSION Return = 4 + ERROR_ALREADY_INITIALIZED Return = 5 + ERROR_NOT_FOUND Return = 6 + ERROR_INSUFFICIENT_SIZE Return = 7 + ERROR_INSUFFICIENT_POWER Return = 8 + ERROR_DRIVER_NOT_LOADED Return = 9 + ERROR_TIMEOUT Return = 10 + ERROR_IRQ_ISSUE Return = 11 + ERROR_LIBRARY_NOT_FOUND Return = 12 + ERROR_FUNCTION_NOT_FOUND Return = 13 + ERROR_CORRUPTED_INFOROM Return = 14 + ERROR_GPU_IS_LOST Return = 15 + ERROR_RESET_REQUIRED Return = 16 + ERROR_OPERATING_SYSTEM Return = 17 + ERROR_LIB_RM_VERSION_MISMATCH Return = 18 + ERROR_IN_USE Return = 19 + ERROR_MEMORY Return = 20 + ERROR_NO_DATA Return = 21 + ERROR_VGPU_ECC_NOT_SUPPORTED Return = 22 + ERROR_INSUFFICIENT_RESOURCES Return = 23 + ERROR_FREQ_NOT_SUPPORTED Return = 24 + ERROR_ARGUMENT_VERSION_MISMATCH Return = 25 + ERROR_DEPRECATED Return = 26 + ERROR_UNKNOWN Return = 999 +) + +// MemoryLocation as declared in nvml/nvml.h +type MemoryLocation int32 + +// MemoryLocation enumeration from nvml/nvml.h +const ( + MEMORY_LOCATION_L1_CACHE MemoryLocation = iota + MEMORY_LOCATION_L2_CACHE MemoryLocation = 1 + MEMORY_LOCATION_DRAM MemoryLocation = 2 + MEMORY_LOCATION_DEVICE_MEMORY MemoryLocation = 2 + MEMORY_LOCATION_REGISTER_FILE MemoryLocation = 3 + MEMORY_LOCATION_TEXTURE_MEMORY MemoryLocation = 4 + MEMORY_LOCATION_TEXTURE_SHM MemoryLocation = 5 + MEMORY_LOCATION_CBU MemoryLocation = 6 + MEMORY_LOCATION_SRAM MemoryLocation = 7 + MEMORY_LOCATION_COUNT MemoryLocation = 8 +) + +// PageRetirementCause as declared in nvml/nvml.h +type PageRetirementCause int32 + +// PageRetirementCause enumeration from nvml/nvml.h +const ( + PAGE_RETIREMENT_CAUSE_MULTIPLE_SINGLE_BIT_ECC_ERRORS PageRetirementCause = iota + PAGE_RETIREMENT_CAUSE_DOUBLE_BIT_ECC_ERROR PageRetirementCause = 1 + PAGE_RETIREMENT_CAUSE_COUNT PageRetirementCause = 2 +) + +// RestrictedAPI as declared in nvml/nvml.h +type RestrictedAPI int32 + +// RestrictedAPI enumeration from nvml/nvml.h +const ( + RESTRICTED_API_SET_APPLICATION_CLOCKS RestrictedAPI = iota + RESTRICTED_API_SET_AUTO_BOOSTED_CLOCKS RestrictedAPI = 1 + RESTRICTED_API_COUNT RestrictedAPI = 2 +) + +// GpuVirtualizationMode as declared in nvml/nvml.h +type GpuVirtualizationMode int32 + +// GpuVirtualizationMode enumeration from nvml/nvml.h +const ( + GPU_VIRTUALIZATION_MODE_NONE GpuVirtualizationMode = iota + GPU_VIRTUALIZATION_MODE_PASSTHROUGH GpuVirtualizationMode = 1 + GPU_VIRTUALIZATION_MODE_VGPU GpuVirtualizationMode = 2 + GPU_VIRTUALIZATION_MODE_HOST_VGPU GpuVirtualizationMode = 3 + GPU_VIRTUALIZATION_MODE_HOST_VSGA GpuVirtualizationMode = 4 +) + +// HostVgpuMode as declared in nvml/nvml.h +type HostVgpuMode int32 + +// HostVgpuMode enumeration from nvml/nvml.h +const ( + HOST_VGPU_MODE_NON_SRIOV HostVgpuMode = iota + HOST_VGPU_MODE_SRIOV HostVgpuMode = 1 +) + +// VgpuVmIdType as declared in nvml/nvml.h +type VgpuVmIdType int32 + +// VgpuVmIdType enumeration from nvml/nvml.h +const ( + VGPU_VM_ID_DOMAIN_ID VgpuVmIdType = iota + VGPU_VM_ID_UUID VgpuVmIdType = 1 +) + +// VgpuGuestInfoState as declared in nvml/nvml.h +type VgpuGuestInfoState int32 + +// VgpuGuestInfoState enumeration from nvml/nvml.h +const ( + VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED VgpuGuestInfoState = iota + VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED VgpuGuestInfoState = 1 +) + +// VgpuCapability as declared in nvml/nvml.h +type VgpuCapability int32 + +// VgpuCapability enumeration from nvml/nvml.h +const ( + VGPU_CAP_NVLINK_P2P VgpuCapability = iota + VGPU_CAP_GPUDIRECT VgpuCapability = 1 + VGPU_CAP_MULTI_VGPU_EXCLUSIVE VgpuCapability = 2 + VGPU_CAP_EXCLUSIVE_TYPE VgpuCapability = 3 + VGPU_CAP_EXCLUSIVE_SIZE VgpuCapability = 4 + VGPU_CAP_COUNT VgpuCapability = 5 +) + +// VgpuDriverCapability as declared in nvml/nvml.h +type VgpuDriverCapability int32 + +// VgpuDriverCapability enumeration from nvml/nvml.h +const ( + VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU VgpuDriverCapability = iota + VGPU_DRIVER_CAP_COUNT VgpuDriverCapability = 1 +) + +// DeviceVgpuCapability as declared in nvml/nvml.h +type DeviceVgpuCapability int32 + +// DeviceVgpuCapability enumeration from nvml/nvml.h +const ( + DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU DeviceVgpuCapability = iota + DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES DeviceVgpuCapability = 1 + DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES DeviceVgpuCapability = 2 + DEVICE_VGPU_CAP_COUNT DeviceVgpuCapability = 3 +) + +// GpuUtilizationDomainId as declared in nvml/nvml.h +type GpuUtilizationDomainId int32 + +// GpuUtilizationDomainId enumeration from nvml/nvml.h +const ( + GPU_UTILIZATION_DOMAIN_GPU GpuUtilizationDomainId = iota + GPU_UTILIZATION_DOMAIN_FB GpuUtilizationDomainId = 1 + GPU_UTILIZATION_DOMAIN_VID GpuUtilizationDomainId = 2 + GPU_UTILIZATION_DOMAIN_BUS GpuUtilizationDomainId = 3 +) + +// FanState as declared in nvml/nvml.h +type FanState int32 + +// FanState enumeration from nvml/nvml.h +const ( + FAN_NORMAL FanState = iota + FAN_FAILED FanState = 1 +) + +// LedColor as declared in nvml/nvml.h +type LedColor int32 + +// LedColor enumeration from nvml/nvml.h +const ( + LED_COLOR_GREEN LedColor = iota + LED_COLOR_AMBER LedColor = 1 +) + +// EncoderType as declared in nvml/nvml.h +type EncoderType int32 + +// EncoderType enumeration from nvml/nvml.h +const ( + ENCODER_QUERY_H264 EncoderType = iota + ENCODER_QUERY_HEVC EncoderType = 1 +) + +// FBCSessionType as declared in nvml/nvml.h +type FBCSessionType int32 + +// FBCSessionType enumeration from nvml/nvml.h +const ( + FBC_SESSION_TYPE_UNKNOWN FBCSessionType = iota + FBC_SESSION_TYPE_TOSYS FBCSessionType = 1 + FBC_SESSION_TYPE_CUDA FBCSessionType = 2 + FBC_SESSION_TYPE_VID FBCSessionType = 3 + FBC_SESSION_TYPE_HWENC FBCSessionType = 4 +) + +// DetachGpuState as declared in nvml/nvml.h +type DetachGpuState int32 + +// DetachGpuState enumeration from nvml/nvml.h +const ( + DETACH_GPU_KEEP DetachGpuState = iota + DETACH_GPU_REMOVE DetachGpuState = 1 +) + +// PcieLinkState as declared in nvml/nvml.h +type PcieLinkState int32 + +// PcieLinkState enumeration from nvml/nvml.h +const ( + PCIE_LINK_KEEP PcieLinkState = iota + PCIE_LINK_SHUT_DOWN PcieLinkState = 1 +) + +// ClockLimitId as declared in nvml/nvml.h +type ClockLimitId int32 + +// ClockLimitId enumeration from nvml/nvml.h +const ( + CLOCK_LIMIT_ID_RANGE_START ClockLimitId = -256 + CLOCK_LIMIT_ID_TDP ClockLimitId = -255 + CLOCK_LIMIT_ID_UNLIMITED ClockLimitId = -254 +) + +// VgpuVmCompatibility as declared in nvml/nvml.h +type VgpuVmCompatibility int32 + +// VgpuVmCompatibility enumeration from nvml/nvml.h +const ( + VGPU_VM_COMPATIBILITY_NONE VgpuVmCompatibility = iota + VGPU_VM_COMPATIBILITY_COLD VgpuVmCompatibility = 1 + VGPU_VM_COMPATIBILITY_HIBERNATE VgpuVmCompatibility = 2 + VGPU_VM_COMPATIBILITY_SLEEP VgpuVmCompatibility = 4 + VGPU_VM_COMPATIBILITY_LIVE VgpuVmCompatibility = 8 +) + +// VgpuPgpuCompatibilityLimitCode as declared in nvml/nvml.h +type VgpuPgpuCompatibilityLimitCode int32 + +// VgpuPgpuCompatibilityLimitCode enumeration from nvml/nvml.h +const ( + VGPU_COMPATIBILITY_LIMIT_NONE VgpuPgpuCompatibilityLimitCode = iota + VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER VgpuPgpuCompatibilityLimitCode = 1 + VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER VgpuPgpuCompatibilityLimitCode = 2 + VGPU_COMPATIBILITY_LIMIT_GPU VgpuPgpuCompatibilityLimitCode = 4 + VGPU_COMPATIBILITY_LIMIT_OTHER VgpuPgpuCompatibilityLimitCode = -2147483648 +) + +// ThermalTarget as declared in nvml/nvml.h +type ThermalTarget int32 + +// ThermalTarget enumeration from nvml/nvml.h +const ( + THERMAL_TARGET_NONE ThermalTarget = iota + THERMAL_TARGET_GPU ThermalTarget = 1 + THERMAL_TARGET_MEMORY ThermalTarget = 2 + THERMAL_TARGET_POWER_SUPPLY ThermalTarget = 4 + THERMAL_TARGET_BOARD ThermalTarget = 8 + THERMAL_TARGET_VCD_BOARD ThermalTarget = 9 + THERMAL_TARGET_VCD_INLET ThermalTarget = 10 + THERMAL_TARGET_VCD_OUTLET ThermalTarget = 11 + THERMAL_TARGET_ALL ThermalTarget = 15 + THERMAL_TARGET_UNKNOWN ThermalTarget = -1 +) + +// ThermalController as declared in nvml/nvml.h +type ThermalController int32 + +// ThermalController enumeration from nvml/nvml.h +const ( + THERMAL_CONTROLLER_NONE ThermalController = iota + THERMAL_CONTROLLER_GPU_INTERNAL ThermalController = 1 + THERMAL_CONTROLLER_ADM1032 ThermalController = 2 + THERMAL_CONTROLLER_ADT7461 ThermalController = 3 + THERMAL_CONTROLLER_MAX6649 ThermalController = 4 + THERMAL_CONTROLLER_MAX1617 ThermalController = 5 + THERMAL_CONTROLLER_LM99 ThermalController = 6 + THERMAL_CONTROLLER_LM89 ThermalController = 7 + THERMAL_CONTROLLER_LM64 ThermalController = 8 + THERMAL_CONTROLLER_G781 ThermalController = 9 + THERMAL_CONTROLLER_ADT7473 ThermalController = 10 + THERMAL_CONTROLLER_SBMAX6649 ThermalController = 11 + THERMAL_CONTROLLER_VBIOSEVT ThermalController = 12 + THERMAL_CONTROLLER_OS ThermalController = 13 + THERMAL_CONTROLLER_NVSYSCON_CANOAS ThermalController = 14 + THERMAL_CONTROLLER_NVSYSCON_E551 ThermalController = 15 + THERMAL_CONTROLLER_MAX6649R ThermalController = 16 + THERMAL_CONTROLLER_ADT7473S ThermalController = 17 + THERMAL_CONTROLLER_UNKNOWN ThermalController = -1 +) + +// GridLicenseFeatureCode as declared in nvml/nvml.h +type GridLicenseFeatureCode int32 + +// GridLicenseFeatureCode enumeration from nvml/nvml.h +const ( + GRID_LICENSE_FEATURE_CODE_UNKNOWN GridLicenseFeatureCode = iota + GRID_LICENSE_FEATURE_CODE_VGPU GridLicenseFeatureCode = 1 + GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX GridLicenseFeatureCode = 2 + GRID_LICENSE_FEATURE_CODE_VWORKSTATION GridLicenseFeatureCode = 2 + GRID_LICENSE_FEATURE_CODE_GAMING GridLicenseFeatureCode = 3 + GRID_LICENSE_FEATURE_CODE_COMPUTE GridLicenseFeatureCode = 4 +) + +// GpmMetricId as declared in nvml/nvml.h +type GpmMetricId int32 + +// GpmMetricId enumeration from nvml/nvml.h +const ( + GPM_METRIC_GRAPHICS_UTIL GpmMetricId = 1 + GPM_METRIC_SM_UTIL GpmMetricId = 2 + GPM_METRIC_SM_OCCUPANCY GpmMetricId = 3 + GPM_METRIC_INTEGER_UTIL GpmMetricId = 4 + GPM_METRIC_ANY_TENSOR_UTIL GpmMetricId = 5 + GPM_METRIC_DFMA_TENSOR_UTIL GpmMetricId = 6 + GPM_METRIC_HMMA_TENSOR_UTIL GpmMetricId = 7 + GPM_METRIC_IMMA_TENSOR_UTIL GpmMetricId = 9 + GPM_METRIC_DRAM_BW_UTIL GpmMetricId = 10 + GPM_METRIC_FP64_UTIL GpmMetricId = 11 + GPM_METRIC_FP32_UTIL GpmMetricId = 12 + GPM_METRIC_FP16_UTIL GpmMetricId = 13 + GPM_METRIC_PCIE_TX_PER_SEC GpmMetricId = 20 + GPM_METRIC_PCIE_RX_PER_SEC GpmMetricId = 21 + GPM_METRIC_NVDEC_0_UTIL GpmMetricId = 30 + GPM_METRIC_NVDEC_1_UTIL GpmMetricId = 31 + GPM_METRIC_NVDEC_2_UTIL GpmMetricId = 32 + GPM_METRIC_NVDEC_3_UTIL GpmMetricId = 33 + GPM_METRIC_NVDEC_4_UTIL GpmMetricId = 34 + GPM_METRIC_NVDEC_5_UTIL GpmMetricId = 35 + GPM_METRIC_NVDEC_6_UTIL GpmMetricId = 36 + GPM_METRIC_NVDEC_7_UTIL GpmMetricId = 37 + GPM_METRIC_NVJPG_0_UTIL GpmMetricId = 40 + GPM_METRIC_NVJPG_1_UTIL GpmMetricId = 41 + GPM_METRIC_NVJPG_2_UTIL GpmMetricId = 42 + GPM_METRIC_NVJPG_3_UTIL GpmMetricId = 43 + GPM_METRIC_NVJPG_4_UTIL GpmMetricId = 44 + GPM_METRIC_NVJPG_5_UTIL GpmMetricId = 45 + GPM_METRIC_NVJPG_6_UTIL GpmMetricId = 46 + GPM_METRIC_NVJPG_7_UTIL GpmMetricId = 47 + GPM_METRIC_NVOFA_0_UTIL GpmMetricId = 50 + GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC GpmMetricId = 60 + GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC GpmMetricId = 61 + GPM_METRIC_NVLINK_L0_RX_PER_SEC GpmMetricId = 62 + GPM_METRIC_NVLINK_L0_TX_PER_SEC GpmMetricId = 63 + GPM_METRIC_NVLINK_L1_RX_PER_SEC GpmMetricId = 64 + GPM_METRIC_NVLINK_L1_TX_PER_SEC GpmMetricId = 65 + GPM_METRIC_NVLINK_L2_RX_PER_SEC GpmMetricId = 66 + GPM_METRIC_NVLINK_L2_TX_PER_SEC GpmMetricId = 67 + GPM_METRIC_NVLINK_L3_RX_PER_SEC GpmMetricId = 68 + GPM_METRIC_NVLINK_L3_TX_PER_SEC GpmMetricId = 69 + GPM_METRIC_NVLINK_L4_RX_PER_SEC GpmMetricId = 70 + GPM_METRIC_NVLINK_L4_TX_PER_SEC GpmMetricId = 71 + GPM_METRIC_NVLINK_L5_RX_PER_SEC GpmMetricId = 72 + GPM_METRIC_NVLINK_L5_TX_PER_SEC GpmMetricId = 73 + GPM_METRIC_NVLINK_L6_RX_PER_SEC GpmMetricId = 74 + GPM_METRIC_NVLINK_L6_TX_PER_SEC GpmMetricId = 75 + GPM_METRIC_NVLINK_L7_RX_PER_SEC GpmMetricId = 76 + GPM_METRIC_NVLINK_L7_TX_PER_SEC GpmMetricId = 77 + GPM_METRIC_NVLINK_L8_RX_PER_SEC GpmMetricId = 78 + GPM_METRIC_NVLINK_L8_TX_PER_SEC GpmMetricId = 79 + GPM_METRIC_NVLINK_L9_RX_PER_SEC GpmMetricId = 80 + GPM_METRIC_NVLINK_L9_TX_PER_SEC GpmMetricId = 81 + GPM_METRIC_NVLINK_L10_RX_PER_SEC GpmMetricId = 82 + GPM_METRIC_NVLINK_L10_TX_PER_SEC GpmMetricId = 83 + GPM_METRIC_NVLINK_L11_RX_PER_SEC GpmMetricId = 84 + GPM_METRIC_NVLINK_L11_TX_PER_SEC GpmMetricId = 85 + GPM_METRIC_NVLINK_L12_RX_PER_SEC GpmMetricId = 86 + GPM_METRIC_NVLINK_L12_TX_PER_SEC GpmMetricId = 87 + GPM_METRIC_NVLINK_L13_RX_PER_SEC GpmMetricId = 88 + GPM_METRIC_NVLINK_L13_TX_PER_SEC GpmMetricId = 89 + GPM_METRIC_NVLINK_L14_RX_PER_SEC GpmMetricId = 90 + GPM_METRIC_NVLINK_L14_TX_PER_SEC GpmMetricId = 91 + GPM_METRIC_NVLINK_L15_RX_PER_SEC GpmMetricId = 92 + GPM_METRIC_NVLINK_L15_TX_PER_SEC GpmMetricId = 93 + GPM_METRIC_NVLINK_L16_RX_PER_SEC GpmMetricId = 94 + GPM_METRIC_NVLINK_L16_TX_PER_SEC GpmMetricId = 95 + GPM_METRIC_NVLINK_L17_RX_PER_SEC GpmMetricId = 96 + GPM_METRIC_NVLINK_L17_TX_PER_SEC GpmMetricId = 97 + GPM_METRIC_MAX GpmMetricId = 98 +) diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const_gen.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const_gen.go new file mode 100644 index 0000000..9038b31 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/const_gen.go @@ -0,0 +1,27 @@ +// Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +import ( + "reflect" +) + +const ( + SYSTEM_PROCESS_NAME_BUFFER_SIZE = 256 +) + +func STRUCT_VERSION(data interface{}, version uint32) uint32 { + return uint32(uint32(reflect.Indirect(reflect.ValueOf(data)).Type().Size()) | (version << uint32(24))) +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/device.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/device.go new file mode 100644 index 0000000..91a7baa --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/device.go @@ -0,0 +1,2652 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +import ( + "unsafe" +) + +// EccBitType +type EccBitType = MemoryErrorType + +// nvml.DeviceGetCount() +func DeviceGetCount() (int, Return) { + var DeviceCount uint32 + ret := nvmlDeviceGetCount(&DeviceCount) + return int(DeviceCount), ret +} + +// nvml.DeviceGetHandleByIndex() +func DeviceGetHandleByIndex(Index int) (Device, Return) { + var Device Device + ret := nvmlDeviceGetHandleByIndex(uint32(Index), &Device) + return Device, ret +} + +// nvml.DeviceGetHandleBySerial() +func DeviceGetHandleBySerial(Serial string) (Device, Return) { + var Device Device + ret := nvmlDeviceGetHandleBySerial(Serial+string(rune(0)), &Device) + return Device, ret +} + +// nvml.DeviceGetHandleByUUID() +func DeviceGetHandleByUUID(Uuid string) (Device, Return) { + var Device Device + ret := nvmlDeviceGetHandleByUUID(Uuid+string(rune(0)), &Device) + return Device, ret +} + +// nvml.DeviceGetHandleByPciBusId() +func DeviceGetHandleByPciBusId(PciBusId string) (Device, Return) { + var Device Device + ret := nvmlDeviceGetHandleByPciBusId(PciBusId+string(rune(0)), &Device) + return Device, ret +} + +// nvml.DeviceGetName() +func DeviceGetName(Device Device) (string, Return) { + Name := make([]byte, DEVICE_NAME_V2_BUFFER_SIZE) + ret := nvmlDeviceGetName(Device, &Name[0], DEVICE_NAME_V2_BUFFER_SIZE) + return string(Name[:clen(Name)]), ret +} + +func (Device Device) GetName() (string, Return) { + return DeviceGetName(Device) +} + +// nvml.DeviceGetBrand() +func DeviceGetBrand(Device Device) (BrandType, Return) { + var _type BrandType + ret := nvmlDeviceGetBrand(Device, &_type) + return _type, ret +} + +func (Device Device) GetBrand() (BrandType, Return) { + return DeviceGetBrand(Device) +} + +// nvml.DeviceGetIndex() +func DeviceGetIndex(Device Device) (int, Return) { + var Index uint32 + ret := nvmlDeviceGetIndex(Device, &Index) + return int(Index), ret +} + +func (Device Device) GetIndex() (int, Return) { + return DeviceGetIndex(Device) +} + +// nvml.DeviceGetSerial() +func DeviceGetSerial(Device Device) (string, Return) { + Serial := make([]byte, DEVICE_SERIAL_BUFFER_SIZE) + ret := nvmlDeviceGetSerial(Device, &Serial[0], DEVICE_SERIAL_BUFFER_SIZE) + return string(Serial[:clen(Serial)]), ret +} + +func (Device Device) GetSerial() (string, Return) { + return DeviceGetSerial(Device) +} + +// nvml.DeviceGetCpuAffinity() +func DeviceGetCpuAffinity(Device Device, NumCPUs int) ([]uint, Return) { + CpuSetSize := uint32((NumCPUs-1)/int(unsafe.Sizeof(uint(0))) + 1) + CpuSet := make([]uint, CpuSetSize) + ret := nvmlDeviceGetCpuAffinity(Device, CpuSetSize, &CpuSet[0]) + return CpuSet, ret +} + +func (Device Device) GetCpuAffinity(NumCPUs int) ([]uint, Return) { + return DeviceGetCpuAffinity(Device, NumCPUs) +} + +// nvml.DeviceSetCpuAffinity() +func DeviceSetCpuAffinity(Device Device) Return { + return nvmlDeviceSetCpuAffinity(Device) +} + +func (Device Device) SetCpuAffinity() Return { + return DeviceSetCpuAffinity(Device) +} + +// nvml.DeviceClearCpuAffinity() +func DeviceClearCpuAffinity(Device Device) Return { + return nvmlDeviceClearCpuAffinity(Device) +} + +func (Device Device) ClearCpuAffinity() Return { + return DeviceClearCpuAffinity(Device) +} + +// nvml.DeviceGetMemoryAffinity() +func DeviceGetMemoryAffinity(Device Device, NumNodes int, Scope AffinityScope) ([]uint, Return) { + NodeSetSize := uint32((NumNodes-1)/int(unsafe.Sizeof(uint(0))) + 1) + NodeSet := make([]uint, NodeSetSize) + ret := nvmlDeviceGetMemoryAffinity(Device, NodeSetSize, &NodeSet[0], Scope) + return NodeSet, ret +} + +func (Device Device) GetMemoryAffinity(NumNodes int, Scope AffinityScope) ([]uint, Return) { + return DeviceGetMemoryAffinity(Device, NumNodes, Scope) +} + +// nvml.DeviceGetCpuAffinityWithinScope() +func DeviceGetCpuAffinityWithinScope(Device Device, NumCPUs int, Scope AffinityScope) ([]uint, Return) { + CpuSetSize := uint32((NumCPUs-1)/int(unsafe.Sizeof(uint(0))) + 1) + CpuSet := make([]uint, CpuSetSize) + ret := nvmlDeviceGetCpuAffinityWithinScope(Device, CpuSetSize, &CpuSet[0], Scope) + return CpuSet, ret +} + +func (Device Device) GetCpuAffinityWithinScope(NumCPUs int, Scope AffinityScope) ([]uint, Return) { + return DeviceGetCpuAffinityWithinScope(Device, NumCPUs, Scope) +} + +// nvml.DeviceGetTopologyCommonAncestor() +func DeviceGetTopologyCommonAncestor(Device1 Device, Device2 Device) (GpuTopologyLevel, Return) { + var PathInfo GpuTopologyLevel + ret := nvmlDeviceGetTopologyCommonAncestor(Device1, Device2, &PathInfo) + return PathInfo, ret +} + +func (Device1 Device) GetTopologyCommonAncestor(Device2 Device) (GpuTopologyLevel, Return) { + return DeviceGetTopologyCommonAncestor(Device1, Device2) +} + +// nvml.DeviceGetTopologyNearestGpus() +func DeviceGetTopologyNearestGpus(device Device, Level GpuTopologyLevel) ([]Device, Return) { + var Count uint32 + ret := nvmlDeviceGetTopologyNearestGpus(device, Level, &Count, nil) + if ret != SUCCESS { + return nil, ret + } + if Count == 0 { + return []Device{}, ret + } + DeviceArray := make([]Device, Count) + ret = nvmlDeviceGetTopologyNearestGpus(device, Level, &Count, &DeviceArray[0]) + return DeviceArray, ret +} + +func (Device Device) GetTopologyNearestGpus(Level GpuTopologyLevel) ([]Device, Return) { + return DeviceGetTopologyNearestGpus(Device, Level) +} + +// nvml.DeviceGetP2PStatus() +func DeviceGetP2PStatus(Device1 Device, Device2 Device, P2pIndex GpuP2PCapsIndex) (GpuP2PStatus, Return) { + var P2pStatus GpuP2PStatus + ret := nvmlDeviceGetP2PStatus(Device1, Device2, P2pIndex, &P2pStatus) + return P2pStatus, ret +} + +func (Device1 Device) GetP2PStatus(Device2 Device, P2pIndex GpuP2PCapsIndex) (GpuP2PStatus, Return) { + return DeviceGetP2PStatus(Device1, Device2, P2pIndex) +} + +// nvml.DeviceGetUUID() +func DeviceGetUUID(Device Device) (string, Return) { + Uuid := make([]byte, DEVICE_UUID_V2_BUFFER_SIZE) + ret := nvmlDeviceGetUUID(Device, &Uuid[0], DEVICE_UUID_V2_BUFFER_SIZE) + return string(Uuid[:clen(Uuid)]), ret +} + +func (Device Device) GetUUID() (string, Return) { + return DeviceGetUUID(Device) +} + +// nvml.DeviceGetMinorNumber() +func DeviceGetMinorNumber(Device Device) (int, Return) { + var MinorNumber uint32 + ret := nvmlDeviceGetMinorNumber(Device, &MinorNumber) + return int(MinorNumber), ret +} + +func (Device Device) GetMinorNumber() (int, Return) { + return DeviceGetMinorNumber(Device) +} + +// nvml.DeviceGetBoardPartNumber() +func DeviceGetBoardPartNumber(Device Device) (string, Return) { + PartNumber := make([]byte, DEVICE_PART_NUMBER_BUFFER_SIZE) + ret := nvmlDeviceGetBoardPartNumber(Device, &PartNumber[0], DEVICE_PART_NUMBER_BUFFER_SIZE) + return string(PartNumber[:clen(PartNumber)]), ret +} + +func (Device Device) GetBoardPartNumber() (string, Return) { + return DeviceGetBoardPartNumber(Device) +} + +// nvml.DeviceGetInforomVersion() +func DeviceGetInforomVersion(Device Device, Object InforomObject) (string, Return) { + Version := make([]byte, DEVICE_INFOROM_VERSION_BUFFER_SIZE) + ret := nvmlDeviceGetInforomVersion(Device, Object, &Version[0], DEVICE_INFOROM_VERSION_BUFFER_SIZE) + return string(Version[:clen(Version)]), ret +} + +func (Device Device) GetInforomVersion(Object InforomObject) (string, Return) { + return DeviceGetInforomVersion(Device, Object) +} + +// nvml.DeviceGetInforomImageVersion() +func DeviceGetInforomImageVersion(Device Device) (string, Return) { + Version := make([]byte, DEVICE_INFOROM_VERSION_BUFFER_SIZE) + ret := nvmlDeviceGetInforomImageVersion(Device, &Version[0], DEVICE_INFOROM_VERSION_BUFFER_SIZE) + return string(Version[:clen(Version)]), ret +} + +func (Device Device) GetInforomImageVersion() (string, Return) { + return DeviceGetInforomImageVersion(Device) +} + +// nvml.DeviceGetInforomConfigurationChecksum() +func DeviceGetInforomConfigurationChecksum(Device Device) (uint32, Return) { + var Checksum uint32 + ret := nvmlDeviceGetInforomConfigurationChecksum(Device, &Checksum) + return Checksum, ret +} + +func (Device Device) GetInforomConfigurationChecksum() (uint32, Return) { + return DeviceGetInforomConfigurationChecksum(Device) +} + +// nvml.DeviceValidateInforom() +func DeviceValidateInforom(Device Device) Return { + return nvmlDeviceValidateInforom(Device) +} + +func (Device Device) ValidateInforom() Return { + return DeviceValidateInforom(Device) +} + +// nvml.DeviceGetDisplayMode() +func DeviceGetDisplayMode(Device Device) (EnableState, Return) { + var Display EnableState + ret := nvmlDeviceGetDisplayMode(Device, &Display) + return Display, ret +} + +func (Device Device) GetDisplayMode() (EnableState, Return) { + return DeviceGetDisplayMode(Device) +} + +// nvml.DeviceGetDisplayActive() +func DeviceGetDisplayActive(Device Device) (EnableState, Return) { + var IsActive EnableState + ret := nvmlDeviceGetDisplayActive(Device, &IsActive) + return IsActive, ret +} + +func (Device Device) GetDisplayActive() (EnableState, Return) { + return DeviceGetDisplayActive(Device) +} + +// nvml.DeviceGetPersistenceMode() +func DeviceGetPersistenceMode(Device Device) (EnableState, Return) { + var Mode EnableState + ret := nvmlDeviceGetPersistenceMode(Device, &Mode) + return Mode, ret +} + +func (Device Device) GetPersistenceMode() (EnableState, Return) { + return DeviceGetPersistenceMode(Device) +} + +// nvml.DeviceGetPciInfo() +func DeviceGetPciInfo(Device Device) (PciInfo, Return) { + var Pci PciInfo + ret := nvmlDeviceGetPciInfo(Device, &Pci) + return Pci, ret +} + +func (Device Device) GetPciInfo() (PciInfo, Return) { + return DeviceGetPciInfo(Device) +} + +// nvml.DeviceGetMaxPcieLinkGeneration() +func DeviceGetMaxPcieLinkGeneration(Device Device) (int, Return) { + var MaxLinkGen uint32 + ret := nvmlDeviceGetMaxPcieLinkGeneration(Device, &MaxLinkGen) + return int(MaxLinkGen), ret +} + +func (Device Device) GetMaxPcieLinkGeneration() (int, Return) { + return DeviceGetMaxPcieLinkGeneration(Device) +} + +// nvml.DeviceGetMaxPcieLinkWidth() +func DeviceGetMaxPcieLinkWidth(Device Device) (int, Return) { + var MaxLinkWidth uint32 + ret := nvmlDeviceGetMaxPcieLinkWidth(Device, &MaxLinkWidth) + return int(MaxLinkWidth), ret +} + +func (Device Device) GetMaxPcieLinkWidth() (int, Return) { + return DeviceGetMaxPcieLinkWidth(Device) +} + +// nvml.DeviceGetCurrPcieLinkGeneration() +func DeviceGetCurrPcieLinkGeneration(Device Device) (int, Return) { + var CurrLinkGen uint32 + ret := nvmlDeviceGetCurrPcieLinkGeneration(Device, &CurrLinkGen) + return int(CurrLinkGen), ret +} + +func (Device Device) GetCurrPcieLinkGeneration() (int, Return) { + return DeviceGetCurrPcieLinkGeneration(Device) +} + +// nvml.DeviceGetCurrPcieLinkWidth() +func DeviceGetCurrPcieLinkWidth(Device Device) (int, Return) { + var CurrLinkWidth uint32 + ret := nvmlDeviceGetCurrPcieLinkWidth(Device, &CurrLinkWidth) + return int(CurrLinkWidth), ret +} + +func (Device Device) GetCurrPcieLinkWidth() (int, Return) { + return DeviceGetCurrPcieLinkWidth(Device) +} + +// nvml.DeviceGetPcieThroughput() +func DeviceGetPcieThroughput(Device Device, Counter PcieUtilCounter) (uint32, Return) { + var Value uint32 + ret := nvmlDeviceGetPcieThroughput(Device, Counter, &Value) + return Value, ret +} + +func (Device Device) GetPcieThroughput(Counter PcieUtilCounter) (uint32, Return) { + return DeviceGetPcieThroughput(Device, Counter) +} + +// nvml.DeviceGetPcieReplayCounter() +func DeviceGetPcieReplayCounter(Device Device) (int, Return) { + var Value uint32 + ret := nvmlDeviceGetPcieReplayCounter(Device, &Value) + return int(Value), ret +} + +func (Device Device) GetPcieReplayCounter() (int, Return) { + return DeviceGetPcieReplayCounter(Device) +} + +// nvml.nvmlDeviceGetClockInfo() +func DeviceGetClockInfo(Device Device, _type ClockType) (uint32, Return) { + var Clock uint32 + ret := nvmlDeviceGetClockInfo(Device, _type, &Clock) + return Clock, ret +} + +func (Device Device) GetClockInfo(_type ClockType) (uint32, Return) { + return DeviceGetClockInfo(Device, _type) +} + +// nvml.DeviceGetMaxClockInfo() +func DeviceGetMaxClockInfo(Device Device, _type ClockType) (uint32, Return) { + var Clock uint32 + ret := nvmlDeviceGetMaxClockInfo(Device, _type, &Clock) + return Clock, ret +} + +func (Device Device) GetMaxClockInfo(_type ClockType) (uint32, Return) { + return DeviceGetMaxClockInfo(Device, _type) +} + +// nvml.DeviceGetApplicationsClock() +func DeviceGetApplicationsClock(Device Device, ClockType ClockType) (uint32, Return) { + var ClockMHz uint32 + ret := nvmlDeviceGetApplicationsClock(Device, ClockType, &ClockMHz) + return ClockMHz, ret +} + +func (Device Device) GetApplicationsClock(ClockType ClockType) (uint32, Return) { + return DeviceGetApplicationsClock(Device, ClockType) +} + +// nvml.DeviceGetDefaultApplicationsClock() +func DeviceGetDefaultApplicationsClock(Device Device, ClockType ClockType) (uint32, Return) { + var ClockMHz uint32 + ret := nvmlDeviceGetDefaultApplicationsClock(Device, ClockType, &ClockMHz) + return ClockMHz, ret +} + +func (Device Device) GetDefaultApplicationsClock(ClockType ClockType) (uint32, Return) { + return DeviceGetDefaultApplicationsClock(Device, ClockType) +} + +// nvml.DeviceResetApplicationsClocks() +func DeviceResetApplicationsClocks(Device Device) Return { + return nvmlDeviceResetApplicationsClocks(Device) +} + +func (Device Device) ResetApplicationsClocks() Return { + return DeviceResetApplicationsClocks(Device) +} + +// nvml.DeviceGetClock() +func DeviceGetClock(Device Device, ClockType ClockType, ClockId ClockId) (uint32, Return) { + var ClockMHz uint32 + ret := nvmlDeviceGetClock(Device, ClockType, ClockId, &ClockMHz) + return ClockMHz, ret +} + +func (Device Device) GetClock(ClockType ClockType, ClockId ClockId) (uint32, Return) { + return DeviceGetClock(Device, ClockType, ClockId) +} + +// nvml.DeviceGetMaxCustomerBoostClock() +func DeviceGetMaxCustomerBoostClock(Device Device, ClockType ClockType) (uint32, Return) { + var ClockMHz uint32 + ret := nvmlDeviceGetMaxCustomerBoostClock(Device, ClockType, &ClockMHz) + return ClockMHz, ret +} + +func (Device Device) GetMaxCustomerBoostClock(ClockType ClockType) (uint32, Return) { + return DeviceGetMaxCustomerBoostClock(Device, ClockType) +} + +// nvml.DeviceGetSupportedMemoryClocks() +func DeviceGetSupportedMemoryClocks(Device Device) (int, uint32, Return) { + var Count, ClocksMHz uint32 + ret := nvmlDeviceGetSupportedMemoryClocks(Device, &Count, &ClocksMHz) + return int(Count), ClocksMHz, ret +} + +func (Device Device) GetSupportedMemoryClocks() (int, uint32, Return) { + return DeviceGetSupportedMemoryClocks(Device) +} + +// nvml.DeviceGetSupportedGraphicsClocks() +func DeviceGetSupportedGraphicsClocks(Device Device, MemoryClockMHz int) (int, uint32, Return) { + var Count, ClocksMHz uint32 + ret := nvmlDeviceGetSupportedGraphicsClocks(Device, uint32(MemoryClockMHz), &Count, &ClocksMHz) + return int(Count), ClocksMHz, ret +} + +func (Device Device) GetSupportedGraphicsClocks(MemoryClockMHz int) (int, uint32, Return) { + return DeviceGetSupportedGraphicsClocks(Device, MemoryClockMHz) +} + +// nvml.DeviceGetAutoBoostedClocksEnabled() +func DeviceGetAutoBoostedClocksEnabled(Device Device) (EnableState, EnableState, Return) { + var IsEnabled, DefaultIsEnabled EnableState + ret := nvmlDeviceGetAutoBoostedClocksEnabled(Device, &IsEnabled, &DefaultIsEnabled) + return IsEnabled, DefaultIsEnabled, ret +} + +func (Device Device) GetAutoBoostedClocksEnabled() (EnableState, EnableState, Return) { + return DeviceGetAutoBoostedClocksEnabled(Device) +} + +// nvml.DeviceSetAutoBoostedClocksEnabled() +func DeviceSetAutoBoostedClocksEnabled(Device Device, Enabled EnableState) Return { + return nvmlDeviceSetAutoBoostedClocksEnabled(Device, Enabled) +} + +func (Device Device) SetAutoBoostedClocksEnabled(Enabled EnableState) Return { + return DeviceSetAutoBoostedClocksEnabled(Device, Enabled) +} + +// nvml.DeviceSetDefaultAutoBoostedClocksEnabled() +func DeviceSetDefaultAutoBoostedClocksEnabled(Device Device, Enabled EnableState, Flags uint32) Return { + return nvmlDeviceSetDefaultAutoBoostedClocksEnabled(Device, Enabled, Flags) +} + +func (Device Device) SetDefaultAutoBoostedClocksEnabled(Enabled EnableState, Flags uint32) Return { + return DeviceSetDefaultAutoBoostedClocksEnabled(Device, Enabled, Flags) +} + +// nvml.DeviceGetFanSpeed() +func DeviceGetFanSpeed(Device Device) (uint32, Return) { + var Speed uint32 + ret := nvmlDeviceGetFanSpeed(Device, &Speed) + return Speed, ret +} + +func (Device Device) GetFanSpeed() (uint32, Return) { + return DeviceGetFanSpeed(Device) +} + +// nvml.DeviceGetFanSpeed_v2() +func DeviceGetFanSpeed_v2(Device Device, Fan int) (uint32, Return) { + var Speed uint32 + ret := nvmlDeviceGetFanSpeed_v2(Device, uint32(Fan), &Speed) + return Speed, ret +} + +func (Device Device) GetFanSpeed_v2(Fan int) (uint32, Return) { + return DeviceGetFanSpeed_v2(Device, Fan) +} + +// nvml.DeviceGetNumFans() +func DeviceGetNumFans(Device Device) (int, Return) { + var NumFans uint32 + ret := nvmlDeviceGetNumFans(Device, &NumFans) + return int(NumFans), ret +} + +func (Device Device) GetNumFans() (int, Return) { + return DeviceGetNumFans(Device) +} + +// nvml.DeviceGetTemperature() +func DeviceGetTemperature(Device Device, SensorType TemperatureSensors) (uint32, Return) { + var Temp uint32 + ret := nvmlDeviceGetTemperature(Device, SensorType, &Temp) + return Temp, ret +} + +func (Device Device) GetTemperature(SensorType TemperatureSensors) (uint32, Return) { + return DeviceGetTemperature(Device, SensorType) +} + +// nvml.DeviceGetTemperatureThreshold() +func DeviceGetTemperatureThreshold(Device Device, ThresholdType TemperatureThresholds) (uint32, Return) { + var Temp uint32 + ret := nvmlDeviceGetTemperatureThreshold(Device, ThresholdType, &Temp) + return Temp, ret +} + +func (Device Device) GetTemperatureThreshold(ThresholdType TemperatureThresholds) (uint32, Return) { + return DeviceGetTemperatureThreshold(Device, ThresholdType) +} + +// nvml.DeviceSetTemperatureThreshold() +func DeviceSetTemperatureThreshold(Device Device, ThresholdType TemperatureThresholds, Temp int) Return { + t := int32(Temp) + ret := nvmlDeviceSetTemperatureThreshold(Device, ThresholdType, &t) + return ret +} + +func (Device Device) SetTemperatureThreshold(ThresholdType TemperatureThresholds, Temp int) Return { + return DeviceSetTemperatureThreshold(Device, ThresholdType, Temp) +} + +// nvml.DeviceGetPerformanceState() +func DeviceGetPerformanceState(Device Device) (Pstates, Return) { + var PState Pstates + ret := nvmlDeviceGetPerformanceState(Device, &PState) + return PState, ret +} + +func (Device Device) GetPerformanceState() (Pstates, Return) { + return DeviceGetPerformanceState(Device) +} + +// nvml.DeviceGetCurrentClocksThrottleReasons() +func DeviceGetCurrentClocksThrottleReasons(Device Device) (uint64, Return) { + var ClocksThrottleReasons uint64 + ret := nvmlDeviceGetCurrentClocksThrottleReasons(Device, &ClocksThrottleReasons) + return ClocksThrottleReasons, ret +} + +func (Device Device) GetCurrentClocksThrottleReasons() (uint64, Return) { + return DeviceGetCurrentClocksThrottleReasons(Device) +} + +// nvml.DeviceGetSupportedClocksThrottleReasons() +func DeviceGetSupportedClocksThrottleReasons(Device Device) (uint64, Return) { + var SupportedClocksThrottleReasons uint64 + ret := nvmlDeviceGetSupportedClocksThrottleReasons(Device, &SupportedClocksThrottleReasons) + return SupportedClocksThrottleReasons, ret +} + +func (Device Device) GetSupportedClocksThrottleReasons() (uint64, Return) { + return DeviceGetSupportedClocksThrottleReasons(Device) +} + +// nvml.DeviceGetPowerState() +func DeviceGetPowerState(Device Device) (Pstates, Return) { + var PState Pstates + ret := nvmlDeviceGetPowerState(Device, &PState) + return PState, ret +} + +func (Device Device) GetPowerState() (Pstates, Return) { + return DeviceGetPowerState(Device) +} + +// nvml.DeviceGetPowerManagementMode() +func DeviceGetPowerManagementMode(Device Device) (EnableState, Return) { + var Mode EnableState + ret := nvmlDeviceGetPowerManagementMode(Device, &Mode) + return Mode, ret +} + +func (Device Device) GetPowerManagementMode() (EnableState, Return) { + return DeviceGetPowerManagementMode(Device) +} + +// nvml.DeviceGetPowerManagementLimit() +func DeviceGetPowerManagementLimit(Device Device) (uint32, Return) { + var Limit uint32 + ret := nvmlDeviceGetPowerManagementLimit(Device, &Limit) + return Limit, ret +} + +func (Device Device) GetPowerManagementLimit() (uint32, Return) { + return DeviceGetPowerManagementLimit(Device) +} + +// nvml.DeviceGetPowerManagementLimitConstraints() +func DeviceGetPowerManagementLimitConstraints(Device Device) (uint32, uint32, Return) { + var MinLimit, MaxLimit uint32 + ret := nvmlDeviceGetPowerManagementLimitConstraints(Device, &MinLimit, &MaxLimit) + return MinLimit, MaxLimit, ret +} + +func (Device Device) GetPowerManagementLimitConstraints() (uint32, uint32, Return) { + return DeviceGetPowerManagementLimitConstraints(Device) +} + +// nvml.DeviceGetPowerManagementDefaultLimit() +func DeviceGetPowerManagementDefaultLimit(Device Device) (uint32, Return) { + var DefaultLimit uint32 + ret := nvmlDeviceGetPowerManagementDefaultLimit(Device, &DefaultLimit) + return DefaultLimit, ret +} + +func (Device Device) GetPowerManagementDefaultLimit() (uint32, Return) { + return DeviceGetPowerManagementDefaultLimit(Device) +} + +// nvml.DeviceGetPowerUsage() +func DeviceGetPowerUsage(Device Device) (uint32, Return) { + var Power uint32 + ret := nvmlDeviceGetPowerUsage(Device, &Power) + return Power, ret +} + +func (Device Device) GetPowerUsage() (uint32, Return) { + return DeviceGetPowerUsage(Device) +} + +// nvml.DeviceGetTotalEnergyConsumption() +func DeviceGetTotalEnergyConsumption(Device Device) (uint64, Return) { + var Energy uint64 + ret := nvmlDeviceGetTotalEnergyConsumption(Device, &Energy) + return Energy, ret +} + +func (Device Device) GetTotalEnergyConsumption() (uint64, Return) { + return DeviceGetTotalEnergyConsumption(Device) +} + +// nvml.DeviceGetEnforcedPowerLimit() +func DeviceGetEnforcedPowerLimit(Device Device) (uint32, Return) { + var Limit uint32 + ret := nvmlDeviceGetEnforcedPowerLimit(Device, &Limit) + return Limit, ret +} + +func (Device Device) GetEnforcedPowerLimit() (uint32, Return) { + return DeviceGetEnforcedPowerLimit(Device) +} + +// nvml.DeviceGetGpuOperationMode() +func DeviceGetGpuOperationMode(Device Device) (GpuOperationMode, GpuOperationMode, Return) { + var Current, Pending GpuOperationMode + ret := nvmlDeviceGetGpuOperationMode(Device, &Current, &Pending) + return Current, Pending, ret +} + +func (Device Device) GetGpuOperationMode() (GpuOperationMode, GpuOperationMode, Return) { + return DeviceGetGpuOperationMode(Device) +} + +// nvml.DeviceGetMemoryInfo() +func DeviceGetMemoryInfo(Device Device) (Memory, Return) { + var Memory Memory + ret := nvmlDeviceGetMemoryInfo(Device, &Memory) + return Memory, ret +} + +func (Device Device) GetMemoryInfo() (Memory, Return) { + return DeviceGetMemoryInfo(Device) +} + +// nvml.DeviceGetMemoryInfo_v2() +func DeviceGetMemoryInfo_v2(Device Device) (Memory_v2, Return) { + var Memory Memory_v2 + Memory.Version = STRUCT_VERSION(Memory, 2) + ret := nvmlDeviceGetMemoryInfo_v2(Device, &Memory) + return Memory, ret +} + +func (Device Device) GetMemoryInfo_v2() (Memory_v2, Return) { + return DeviceGetMemoryInfo_v2(Device) +} + +// nvml.DeviceGetComputeMode() +func DeviceGetComputeMode(Device Device) (ComputeMode, Return) { + var Mode ComputeMode + ret := nvmlDeviceGetComputeMode(Device, &Mode) + return Mode, ret +} + +func (Device Device) GetComputeMode() (ComputeMode, Return) { + return DeviceGetComputeMode(Device) +} + +// nvml.DeviceGetCudaComputeCapability() +func DeviceGetCudaComputeCapability(Device Device) (int, int, Return) { + var Major, Minor int32 + ret := nvmlDeviceGetCudaComputeCapability(Device, &Major, &Minor) + return int(Major), int(Minor), ret +} + +func (Device Device) GetCudaComputeCapability() (int, int, Return) { + return DeviceGetCudaComputeCapability(Device) +} + +// nvml.DeviceGetEccMode() +func DeviceGetEccMode(Device Device) (EnableState, EnableState, Return) { + var Current, Pending EnableState + ret := nvmlDeviceGetEccMode(Device, &Current, &Pending) + return Current, Pending, ret +} + +func (Device Device) GetEccMode() (EnableState, EnableState, Return) { + return DeviceGetEccMode(Device) +} + +// nvml.DeviceGetBoardId() +func DeviceGetBoardId(Device Device) (uint32, Return) { + var BoardId uint32 + ret := nvmlDeviceGetBoardId(Device, &BoardId) + return BoardId, ret +} + +func (Device Device) GetBoardId() (uint32, Return) { + return DeviceGetBoardId(Device) +} + +// nvml.DeviceGetMultiGpuBoard() +func DeviceGetMultiGpuBoard(Device Device) (int, Return) { + var MultiGpuBool uint32 + ret := nvmlDeviceGetMultiGpuBoard(Device, &MultiGpuBool) + return int(MultiGpuBool), ret +} + +func (Device Device) GetMultiGpuBoard() (int, Return) { + return DeviceGetMultiGpuBoard(Device) +} + +// nvml.DeviceGetTotalEccErrors() +func DeviceGetTotalEccErrors(Device Device, ErrorType MemoryErrorType, CounterType EccCounterType) (uint64, Return) { + var EccCounts uint64 + ret := nvmlDeviceGetTotalEccErrors(Device, ErrorType, CounterType, &EccCounts) + return EccCounts, ret +} + +func (Device Device) GetTotalEccErrors(ErrorType MemoryErrorType, CounterType EccCounterType) (uint64, Return) { + return DeviceGetTotalEccErrors(Device, ErrorType, CounterType) +} + +// nvml.DeviceGetDetailedEccErrors() +func DeviceGetDetailedEccErrors(Device Device, ErrorType MemoryErrorType, CounterType EccCounterType) (EccErrorCounts, Return) { + var EccCounts EccErrorCounts + ret := nvmlDeviceGetDetailedEccErrors(Device, ErrorType, CounterType, &EccCounts) + return EccCounts, ret +} + +func (Device Device) GetDetailedEccErrors(ErrorType MemoryErrorType, CounterType EccCounterType) (EccErrorCounts, Return) { + return DeviceGetDetailedEccErrors(Device, ErrorType, CounterType) +} + +// nvml.DeviceGetMemoryErrorCounter() +func DeviceGetMemoryErrorCounter(Device Device, ErrorType MemoryErrorType, CounterType EccCounterType, LocationType MemoryLocation) (uint64, Return) { + var Count uint64 + ret := nvmlDeviceGetMemoryErrorCounter(Device, ErrorType, CounterType, LocationType, &Count) + return Count, ret +} + +func (Device Device) GetMemoryErrorCounter(ErrorType MemoryErrorType, CounterType EccCounterType, LocationType MemoryLocation) (uint64, Return) { + return DeviceGetMemoryErrorCounter(Device, ErrorType, CounterType, LocationType) +} + +// nvml.DeviceGetUtilizationRates() +func DeviceGetUtilizationRates(Device Device) (Utilization, Return) { + var Utilization Utilization + ret := nvmlDeviceGetUtilizationRates(Device, &Utilization) + return Utilization, ret +} + +func (Device Device) GetUtilizationRates() (Utilization, Return) { + return DeviceGetUtilizationRates(Device) +} + +// nvml.DeviceGetEncoderUtilization() +func DeviceGetEncoderUtilization(Device Device) (uint32, uint32, Return) { + var Utilization, SamplingPeriodUs uint32 + ret := nvmlDeviceGetEncoderUtilization(Device, &Utilization, &SamplingPeriodUs) + return Utilization, SamplingPeriodUs, ret +} + +func (Device Device) GetEncoderUtilization() (uint32, uint32, Return) { + return DeviceGetEncoderUtilization(Device) +} + +// nvml.DeviceGetEncoderCapacity() +func DeviceGetEncoderCapacity(Device Device, EncoderQueryType EncoderType) (int, Return) { + var EncoderCapacity uint32 + ret := nvmlDeviceGetEncoderCapacity(Device, EncoderQueryType, &EncoderCapacity) + return int(EncoderCapacity), ret +} + +func (Device Device) GetEncoderCapacity(EncoderQueryType EncoderType) (int, Return) { + return DeviceGetEncoderCapacity(Device, EncoderQueryType) +} + +// nvml.DeviceGetEncoderStats() +func DeviceGetEncoderStats(Device Device) (int, uint32, uint32, Return) { + var SessionCount, AverageFps, AverageLatency uint32 + ret := nvmlDeviceGetEncoderStats(Device, &SessionCount, &AverageFps, &AverageLatency) + return int(SessionCount), AverageFps, AverageLatency, ret +} + +func (Device Device) GetEncoderStats() (int, uint32, uint32, Return) { + return DeviceGetEncoderStats(Device) +} + +// nvml.DeviceGetEncoderSessions() +func DeviceGetEncoderSessions(Device Device) ([]EncoderSessionInfo, Return) { + var SessionCount uint32 = 1 // Will be reduced upon returning + for { + SessionInfos := make([]EncoderSessionInfo, SessionCount) + ret := nvmlDeviceGetEncoderSessions(Device, &SessionCount, &SessionInfos[0]) + if ret == SUCCESS { + return SessionInfos[:SessionCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + SessionCount *= 2 + } +} + +func (Device Device) GetEncoderSessions() ([]EncoderSessionInfo, Return) { + return DeviceGetEncoderSessions(Device) +} + +// nvml.DeviceGetDecoderUtilization() +func DeviceGetDecoderUtilization(Device Device) (uint32, uint32, Return) { + var Utilization, SamplingPeriodUs uint32 + ret := nvmlDeviceGetDecoderUtilization(Device, &Utilization, &SamplingPeriodUs) + return Utilization, SamplingPeriodUs, ret +} + +func (Device Device) GetDecoderUtilization() (uint32, uint32, Return) { + return DeviceGetDecoderUtilization(Device) +} + +// nvml.DeviceGetFBCStats() +func DeviceGetFBCStats(Device Device) (FBCStats, Return) { + var FbcStats FBCStats + ret := nvmlDeviceGetFBCStats(Device, &FbcStats) + return FbcStats, ret +} + +func (Device Device) GetFBCStats() (FBCStats, Return) { + return DeviceGetFBCStats(Device) +} + +// nvml.DeviceGetFBCSessions() +func DeviceGetFBCSessions(Device Device) ([]FBCSessionInfo, Return) { + var SessionCount uint32 = 1 // Will be reduced upon returning + for { + SessionInfo := make([]FBCSessionInfo, SessionCount) + ret := nvmlDeviceGetFBCSessions(Device, &SessionCount, &SessionInfo[0]) + if ret == SUCCESS { + return SessionInfo[:SessionCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + SessionCount *= 2 + } +} + +func (Device Device) GetFBCSessions() ([]FBCSessionInfo, Return) { + return DeviceGetFBCSessions(Device) +} + +// nvml.DeviceGetDriverModel() +func DeviceGetDriverModel(Device Device) (DriverModel, DriverModel, Return) { + var Current, Pending DriverModel + ret := nvmlDeviceGetDriverModel(Device, &Current, &Pending) + return Current, Pending, ret +} + +func (Device Device) GetDriverModel() (DriverModel, DriverModel, Return) { + return DeviceGetDriverModel(Device) +} + +// nvml.DeviceGetVbiosVersion() +func DeviceGetVbiosVersion(Device Device) (string, Return) { + Version := make([]byte, DEVICE_VBIOS_VERSION_BUFFER_SIZE) + ret := nvmlDeviceGetVbiosVersion(Device, &Version[0], DEVICE_VBIOS_VERSION_BUFFER_SIZE) + return string(Version[:clen(Version)]), ret +} + +func (Device Device) GetVbiosVersion() (string, Return) { + return DeviceGetVbiosVersion(Device) +} + +// nvml.DeviceGetBridgeChipInfo() +func DeviceGetBridgeChipInfo(Device Device) (BridgeChipHierarchy, Return) { + var BridgeHierarchy BridgeChipHierarchy + ret := nvmlDeviceGetBridgeChipInfo(Device, &BridgeHierarchy) + return BridgeHierarchy, ret +} + +func (Device Device) GetBridgeChipInfo() (BridgeChipHierarchy, Return) { + return DeviceGetBridgeChipInfo(Device) +} + +// nvml.DeviceGetComputeRunningProcesses() +func deviceGetComputeRunningProcesses_v1(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo_v1, InfoCount) + ret := nvmlDeviceGetComputeRunningProcesses_v1(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return ProcessInfo_v1Slice(Infos[:InfoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func deviceGetComputeRunningProcesses_v2(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo_v2, InfoCount) + ret := nvmlDeviceGetComputeRunningProcesses_v2(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return ProcessInfo_v2Slice(Infos[:InfoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func deviceGetComputeRunningProcesses_v3(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo, InfoCount) + ret := nvmlDeviceGetComputeRunningProcesses_v3(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return Infos[:InfoCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func (Device Device) GetComputeRunningProcesses() ([]ProcessInfo, Return) { + return DeviceGetComputeRunningProcesses(Device) +} + +// nvml.DeviceGetGraphicsRunningProcesses() +func deviceGetGraphicsRunningProcesses_v1(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo_v1, InfoCount) + ret := nvmlDeviceGetGraphicsRunningProcesses_v1(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return ProcessInfo_v1Slice(Infos[:InfoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func deviceGetGraphicsRunningProcesses_v2(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo_v2, InfoCount) + ret := nvmlDeviceGetGraphicsRunningProcesses_v2(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return ProcessInfo_v2Slice(Infos[:InfoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func deviceGetGraphicsRunningProcesses_v3(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo, InfoCount) + ret := nvmlDeviceGetGraphicsRunningProcesses_v3(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return Infos[:InfoCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func (Device Device) GetGraphicsRunningProcesses() ([]ProcessInfo, Return) { + return DeviceGetGraphicsRunningProcesses(Device) +} + +// nvml.DeviceGetMPSComputeRunningProcesses() +func deviceGetMPSComputeRunningProcesses_v1(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo_v1, InfoCount) + ret := nvmlDeviceGetMPSComputeRunningProcesses_v1(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return ProcessInfo_v1Slice(Infos[:InfoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func deviceGetMPSComputeRunningProcesses_v2(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo_v2, InfoCount) + ret := nvmlDeviceGetMPSComputeRunningProcesses_v2(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return ProcessInfo_v2Slice(Infos[:InfoCount]).ToProcessInfoSlice(), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func deviceGetMPSComputeRunningProcesses_v3(Device Device) ([]ProcessInfo, Return) { + var InfoCount uint32 = 1 // Will be reduced upon returning + for { + Infos := make([]ProcessInfo, InfoCount) + ret := nvmlDeviceGetMPSComputeRunningProcesses_v3(Device, &InfoCount, &Infos[0]) + if ret == SUCCESS { + return Infos[:InfoCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + InfoCount *= 2 + } +} + +func (Device Device) GetMPSComputeRunningProcesses() ([]ProcessInfo, Return) { + return DeviceGetMPSComputeRunningProcesses(Device) +} + +// nvml.DeviceOnSameBoard() +func DeviceOnSameBoard(Device1 Device, Device2 Device) (int, Return) { + var OnSameBoard int32 + ret := nvmlDeviceOnSameBoard(Device1, Device2, &OnSameBoard) + return int(OnSameBoard), ret +} + +func (Device1 Device) OnSameBoard(Device2 Device) (int, Return) { + return DeviceOnSameBoard(Device1, Device2) +} + +// nvml.DeviceGetAPIRestriction() +func DeviceGetAPIRestriction(Device Device, ApiType RestrictedAPI) (EnableState, Return) { + var IsRestricted EnableState + ret := nvmlDeviceGetAPIRestriction(Device, ApiType, &IsRestricted) + return IsRestricted, ret +} + +func (Device Device) GetAPIRestriction(ApiType RestrictedAPI) (EnableState, Return) { + return DeviceGetAPIRestriction(Device, ApiType) +} + +// nvml.DeviceGetSamples() +func DeviceGetSamples(Device Device, _type SamplingType, LastSeenTimeStamp uint64) (ValueType, []Sample, Return) { + var SampleValType ValueType + var SampleCount uint32 + ret := nvmlDeviceGetSamples(Device, _type, LastSeenTimeStamp, &SampleValType, &SampleCount, nil) + if ret != SUCCESS { + return SampleValType, nil, ret + } + if SampleCount == 0 { + return SampleValType, []Sample{}, ret + } + Samples := make([]Sample, SampleCount) + ret = nvmlDeviceGetSamples(Device, _type, LastSeenTimeStamp, &SampleValType, &SampleCount, &Samples[0]) + return SampleValType, Samples, ret +} + +func (Device Device) GetSamples(_type SamplingType, LastSeenTimeStamp uint64) (ValueType, []Sample, Return) { + return DeviceGetSamples(Device, _type, LastSeenTimeStamp) +} + +// nvml.DeviceGetBAR1MemoryInfo() +func DeviceGetBAR1MemoryInfo(Device Device) (BAR1Memory, Return) { + var Bar1Memory BAR1Memory + ret := nvmlDeviceGetBAR1MemoryInfo(Device, &Bar1Memory) + return Bar1Memory, ret +} + +func (Device Device) GetBAR1MemoryInfo() (BAR1Memory, Return) { + return DeviceGetBAR1MemoryInfo(Device) +} + +// nvml.DeviceGetViolationStatus() +func DeviceGetViolationStatus(Device Device, PerfPolicyType PerfPolicyType) (ViolationTime, Return) { + var ViolTime ViolationTime + ret := nvmlDeviceGetViolationStatus(Device, PerfPolicyType, &ViolTime) + return ViolTime, ret +} + +func (Device Device) GetViolationStatus(PerfPolicyType PerfPolicyType) (ViolationTime, Return) { + return DeviceGetViolationStatus(Device, PerfPolicyType) +} + +// nvml.DeviceGetIrqNum() +func DeviceGetIrqNum(Device Device) (int, Return) { + var IrqNum uint32 + ret := nvmlDeviceGetIrqNum(Device, &IrqNum) + return int(IrqNum), ret +} + +func (Device Device) GetIrqNum() (int, Return) { + return DeviceGetIrqNum(Device) +} + +// nvml.DeviceGetNumGpuCores() +func DeviceGetNumGpuCores(Device Device) (int, Return) { + var NumCores uint32 + ret := nvmlDeviceGetNumGpuCores(Device, &NumCores) + return int(NumCores), ret +} + +func (Device Device) GetNumGpuCores() (int, Return) { + return DeviceGetNumGpuCores(Device) +} + +// nvml.DeviceGetPowerSource() +func DeviceGetPowerSource(Device Device) (PowerSource, Return) { + var PowerSource PowerSource + ret := nvmlDeviceGetPowerSource(Device, &PowerSource) + return PowerSource, ret +} + +func (Device Device) GetPowerSource() (PowerSource, Return) { + return DeviceGetPowerSource(Device) +} + +// nvml.DeviceGetMemoryBusWidth() +func DeviceGetMemoryBusWidth(Device Device) (uint32, Return) { + var BusWidth uint32 + ret := nvmlDeviceGetMemoryBusWidth(Device, &BusWidth) + return BusWidth, ret +} + +func (Device Device) GetMemoryBusWidth() (uint32, Return) { + return DeviceGetMemoryBusWidth(Device) +} + +// nvml.DeviceGetPcieLinkMaxSpeed() +func DeviceGetPcieLinkMaxSpeed(Device Device) (uint32, Return) { + var MaxSpeed uint32 + ret := nvmlDeviceGetPcieLinkMaxSpeed(Device, &MaxSpeed) + return MaxSpeed, ret +} + +func (Device Device) GetPcieLinkMaxSpeed() (uint32, Return) { + return DeviceGetPcieLinkMaxSpeed(Device) +} + +// nvml.DeviceGetAdaptiveClockInfoStatus() +func DeviceGetAdaptiveClockInfoStatus(Device Device) (uint32, Return) { + var AdaptiveClockStatus uint32 + ret := nvmlDeviceGetAdaptiveClockInfoStatus(Device, &AdaptiveClockStatus) + return AdaptiveClockStatus, ret +} + +func (Device Device) GetAdaptiveClockInfoStatus() (uint32, Return) { + return DeviceGetAdaptiveClockInfoStatus(Device) +} + +// nvml.DeviceGetAccountingMode() +func DeviceGetAccountingMode(Device Device) (EnableState, Return) { + var Mode EnableState + ret := nvmlDeviceGetAccountingMode(Device, &Mode) + return Mode, ret +} + +func (Device Device) GetAccountingMode() (EnableState, Return) { + return DeviceGetAccountingMode(Device) +} + +// nvml.DeviceGetAccountingStats() +func DeviceGetAccountingStats(Device Device, Pid uint32) (AccountingStats, Return) { + var Stats AccountingStats + ret := nvmlDeviceGetAccountingStats(Device, Pid, &Stats) + return Stats, ret +} + +func (Device Device) GetAccountingStats(Pid uint32) (AccountingStats, Return) { + return DeviceGetAccountingStats(Device, Pid) +} + +// nvml.DeviceGetAccountingPids() +func DeviceGetAccountingPids(Device Device) ([]int, Return) { + var Count uint32 = 1 // Will be reduced upon returning + for { + Pids := make([]uint32, Count) + ret := nvmlDeviceGetAccountingPids(Device, &Count, &Pids[0]) + if ret == SUCCESS { + return uint32SliceToIntSlice(Pids[:Count]), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + Count *= 2 + } +} + +func (Device Device) GetAccountingPids() ([]int, Return) { + return DeviceGetAccountingPids(Device) +} + +// nvml.DeviceGetAccountingBufferSize() +func DeviceGetAccountingBufferSize(Device Device) (int, Return) { + var BufferSize uint32 + ret := nvmlDeviceGetAccountingBufferSize(Device, &BufferSize) + return int(BufferSize), ret +} + +func (Device Device) GetAccountingBufferSize() (int, Return) { + return DeviceGetAccountingBufferSize(Device) +} + +// nvml.DeviceGetRetiredPages() +func DeviceGetRetiredPages(Device Device, Cause PageRetirementCause) ([]uint64, Return) { + var PageCount uint32 = 1 // Will be reduced upon returning + for { + Addresses := make([]uint64, PageCount) + ret := nvmlDeviceGetRetiredPages(Device, Cause, &PageCount, &Addresses[0]) + if ret == SUCCESS { + return Addresses[:PageCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + PageCount *= 2 + } +} + +func (Device Device) GetRetiredPages(Cause PageRetirementCause) ([]uint64, Return) { + return DeviceGetRetiredPages(Device, Cause) +} + +// nvml.DeviceGetRetiredPages_v2() +func DeviceGetRetiredPages_v2(Device Device, Cause PageRetirementCause) ([]uint64, []uint64, Return) { + var PageCount uint32 = 1 // Will be reduced upon returning + for { + Addresses := make([]uint64, PageCount) + Timestamps := make([]uint64, PageCount) + ret := nvmlDeviceGetRetiredPages_v2(Device, Cause, &PageCount, &Addresses[0], &Timestamps[0]) + if ret == SUCCESS { + return Addresses[:PageCount], Timestamps[:PageCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, nil, ret + } + PageCount *= 2 + } +} + +func (Device Device) GetRetiredPages_v2(Cause PageRetirementCause) ([]uint64, []uint64, Return) { + return DeviceGetRetiredPages_v2(Device, Cause) +} + +// nvml.DeviceGetRetiredPagesPendingStatus() +func DeviceGetRetiredPagesPendingStatus(Device Device) (EnableState, Return) { + var IsPending EnableState + ret := nvmlDeviceGetRetiredPagesPendingStatus(Device, &IsPending) + return IsPending, ret +} + +func (Device Device) GetRetiredPagesPendingStatus() (EnableState, Return) { + return DeviceGetRetiredPagesPendingStatus(Device) +} + +// nvml.DeviceSetPersistenceMode() +func DeviceSetPersistenceMode(Device Device, Mode EnableState) Return { + return nvmlDeviceSetPersistenceMode(Device, Mode) +} + +func (Device Device) SetPersistenceMode(Mode EnableState) Return { + return DeviceSetPersistenceMode(Device, Mode) +} + +// nvml.DeviceSetComputeMode() +func DeviceSetComputeMode(Device Device, Mode ComputeMode) Return { + return nvmlDeviceSetComputeMode(Device, Mode) +} + +func (Device Device) SetComputeMode(Mode ComputeMode) Return { + return DeviceSetComputeMode(Device, Mode) +} + +// nvml.DeviceSetEccMode() +func DeviceSetEccMode(Device Device, Ecc EnableState) Return { + return nvmlDeviceSetEccMode(Device, Ecc) +} + +func (Device Device) SetEccMode(Ecc EnableState) Return { + return DeviceSetEccMode(Device, Ecc) +} + +// nvml.DeviceClearEccErrorCounts() +func DeviceClearEccErrorCounts(Device Device, CounterType EccCounterType) Return { + return nvmlDeviceClearEccErrorCounts(Device, CounterType) +} + +func (Device Device) ClearEccErrorCounts(CounterType EccCounterType) Return { + return DeviceClearEccErrorCounts(Device, CounterType) +} + +// nvml.DeviceSetDriverModel() +func DeviceSetDriverModel(Device Device, DriverModel DriverModel, Flags uint32) Return { + return nvmlDeviceSetDriverModel(Device, DriverModel, Flags) +} + +func (Device Device) SetDriverModel(DriverModel DriverModel, Flags uint32) Return { + return DeviceSetDriverModel(Device, DriverModel, Flags) +} + +// nvml.DeviceSetGpuLockedClocks() +func DeviceSetGpuLockedClocks(Device Device, MinGpuClockMHz uint32, MaxGpuClockMHz uint32) Return { + return nvmlDeviceSetGpuLockedClocks(Device, MinGpuClockMHz, MaxGpuClockMHz) +} + +func (Device Device) SetGpuLockedClocks(MinGpuClockMHz uint32, MaxGpuClockMHz uint32) Return { + return DeviceSetGpuLockedClocks(Device, MinGpuClockMHz, MaxGpuClockMHz) +} + +// nvml.DeviceResetGpuLockedClocks() +func DeviceResetGpuLockedClocks(Device Device) Return { + return nvmlDeviceResetGpuLockedClocks(Device) +} + +func (Device Device) ResetGpuLockedClocks() Return { + return DeviceResetGpuLockedClocks(Device) +} + +// nvmlDeviceSetMemoryLockedClocks() +func DeviceSetMemoryLockedClocks(Device Device, MinMemClockMHz uint32, MaxMemClockMHz uint32) Return { + return nvmlDeviceSetMemoryLockedClocks(Device, MinMemClockMHz, MaxMemClockMHz) +} + +func (Device Device) SetMemoryLockedClocks(NinMemClockMHz uint32, MaxMemClockMHz uint32) Return { + return DeviceSetMemoryLockedClocks(Device, NinMemClockMHz, MaxMemClockMHz) +} + +// nvmlDeviceResetMemoryLockedClocks() +func DeviceResetMemoryLockedClocks(Device Device) Return { + return nvmlDeviceResetMemoryLockedClocks(Device) +} + +func (Device Device) ResetMemoryLockedClocks() Return { + return DeviceResetMemoryLockedClocks(Device) +} + +// nvml.DeviceGetClkMonStatus() +func DeviceGetClkMonStatus(Device Device) (ClkMonStatus, Return) { + var Status ClkMonStatus + ret := nvmlDeviceGetClkMonStatus(Device, &Status) + return Status, ret +} + +func (Device Device) GetClkMonStatus() (ClkMonStatus, Return) { + return DeviceGetClkMonStatus(Device) +} + +// nvml.DeviceSetApplicationsClocks() +func DeviceSetApplicationsClocks(Device Device, MemClockMHz uint32, GraphicsClockMHz uint32) Return { + return nvmlDeviceSetApplicationsClocks(Device, MemClockMHz, GraphicsClockMHz) +} + +func (Device Device) SetApplicationsClocks(MemClockMHz uint32, GraphicsClockMHz uint32) Return { + return DeviceSetApplicationsClocks(Device, MemClockMHz, GraphicsClockMHz) +} + +// nvml.DeviceSetPowerManagementLimit() +func DeviceSetPowerManagementLimit(Device Device, Limit uint32) Return { + return nvmlDeviceSetPowerManagementLimit(Device, Limit) +} + +func (Device Device) SetPowerManagementLimit(Limit uint32) Return { + return DeviceSetPowerManagementLimit(Device, Limit) +} + +// nvml.DeviceSetGpuOperationMode() +func DeviceSetGpuOperationMode(Device Device, Mode GpuOperationMode) Return { + return nvmlDeviceSetGpuOperationMode(Device, Mode) +} + +func (Device Device) SetGpuOperationMode(Mode GpuOperationMode) Return { + return DeviceSetGpuOperationMode(Device, Mode) +} + +// nvml.DeviceSetAPIRestriction() +func DeviceSetAPIRestriction(Device Device, ApiType RestrictedAPI, IsRestricted EnableState) Return { + return nvmlDeviceSetAPIRestriction(Device, ApiType, IsRestricted) +} + +func (Device Device) SetAPIRestriction(ApiType RestrictedAPI, IsRestricted EnableState) Return { + return DeviceSetAPIRestriction(Device, ApiType, IsRestricted) +} + +// nvml.DeviceSetAccountingMode() +func DeviceSetAccountingMode(Device Device, Mode EnableState) Return { + return nvmlDeviceSetAccountingMode(Device, Mode) +} + +func (Device Device) SetAccountingMode(Mode EnableState) Return { + return DeviceSetAccountingMode(Device, Mode) +} + +// nvml.DeviceClearAccountingPids() +func DeviceClearAccountingPids(Device Device) Return { + return nvmlDeviceClearAccountingPids(Device) +} + +func (Device Device) ClearAccountingPids() Return { + return DeviceClearAccountingPids(Device) +} + +// nvml.DeviceGetNvLinkState() +func DeviceGetNvLinkState(Device Device, Link int) (EnableState, Return) { + var IsActive EnableState + ret := nvmlDeviceGetNvLinkState(Device, uint32(Link), &IsActive) + return IsActive, ret +} + +func (Device Device) GetNvLinkState(Link int) (EnableState, Return) { + return DeviceGetNvLinkState(Device, Link) +} + +// nvml.DeviceGetNvLinkVersion() +func DeviceGetNvLinkVersion(Device Device, Link int) (uint32, Return) { + var Version uint32 + ret := nvmlDeviceGetNvLinkVersion(Device, uint32(Link), &Version) + return Version, ret +} + +func (Device Device) GetNvLinkVersion(Link int) (uint32, Return) { + return DeviceGetNvLinkVersion(Device, Link) +} + +// nvml.DeviceGetNvLinkCapability() +func DeviceGetNvLinkCapability(Device Device, Link int, Capability NvLinkCapability) (uint32, Return) { + var CapResult uint32 + ret := nvmlDeviceGetNvLinkCapability(Device, uint32(Link), Capability, &CapResult) + return CapResult, ret +} + +func (Device Device) GetNvLinkCapability(Link int, Capability NvLinkCapability) (uint32, Return) { + return DeviceGetNvLinkCapability(Device, Link, Capability) +} + +// nvml.DeviceGetNvLinkRemotePciInfo() +func DeviceGetNvLinkRemotePciInfo(Device Device, Link int) (PciInfo, Return) { + var Pci PciInfo + ret := nvmlDeviceGetNvLinkRemotePciInfo(Device, uint32(Link), &Pci) + return Pci, ret +} + +func (Device Device) GetNvLinkRemotePciInfo(Link int) (PciInfo, Return) { + return DeviceGetNvLinkRemotePciInfo(Device, Link) +} + +// nvml.DeviceGetNvLinkErrorCounter() +func DeviceGetNvLinkErrorCounter(Device Device, Link int, Counter NvLinkErrorCounter) (uint64, Return) { + var CounterValue uint64 + ret := nvmlDeviceGetNvLinkErrorCounter(Device, uint32(Link), Counter, &CounterValue) + return CounterValue, ret +} + +func (Device Device) GetNvLinkErrorCounter(Link int, Counter NvLinkErrorCounter) (uint64, Return) { + return DeviceGetNvLinkErrorCounter(Device, Link, Counter) +} + +// nvml.DeviceResetNvLinkErrorCounters() +func DeviceResetNvLinkErrorCounters(Device Device, Link int) Return { + return nvmlDeviceResetNvLinkErrorCounters(Device, uint32(Link)) +} + +func (Device Device) ResetNvLinkErrorCounters(Link int) Return { + return DeviceResetNvLinkErrorCounters(Device, Link) +} + +// nvml.DeviceSetNvLinkUtilizationControl() +func DeviceSetNvLinkUtilizationControl(Device Device, Link int, Counter int, Control *NvLinkUtilizationControl, Reset bool) Return { + reset := uint32(0) + if Reset { + reset = 1 + } + return nvmlDeviceSetNvLinkUtilizationControl(Device, uint32(Link), uint32(Counter), Control, reset) +} + +func (Device Device) SetNvLinkUtilizationControl(Link int, Counter int, Control *NvLinkUtilizationControl, Reset bool) Return { + return DeviceSetNvLinkUtilizationControl(Device, Link, Counter, Control, Reset) +} + +// nvml.DeviceGetNvLinkUtilizationControl() +func DeviceGetNvLinkUtilizationControl(Device Device, Link int, Counter int) (NvLinkUtilizationControl, Return) { + var Control NvLinkUtilizationControl + ret := nvmlDeviceGetNvLinkUtilizationControl(Device, uint32(Link), uint32(Counter), &Control) + return Control, ret +} + +func (Device Device) GetNvLinkUtilizationControl(Link int, Counter int) (NvLinkUtilizationControl, Return) { + return DeviceGetNvLinkUtilizationControl(Device, Link, Counter) +} + +// nvml.DeviceGetNvLinkUtilizationCounter() +func DeviceGetNvLinkUtilizationCounter(Device Device, Link int, Counter int) (uint64, uint64, Return) { + var Rxcounter, Txcounter uint64 + ret := nvmlDeviceGetNvLinkUtilizationCounter(Device, uint32(Link), uint32(Counter), &Rxcounter, &Txcounter) + return Rxcounter, Txcounter, ret +} + +func (Device Device) GetNvLinkUtilizationCounter(Link int, Counter int) (uint64, uint64, Return) { + return DeviceGetNvLinkUtilizationCounter(Device, Link, Counter) +} + +// nvml.DeviceFreezeNvLinkUtilizationCounter() +func DeviceFreezeNvLinkUtilizationCounter(Device Device, Link int, Counter int, Freeze EnableState) Return { + return nvmlDeviceFreezeNvLinkUtilizationCounter(Device, uint32(Link), uint32(Counter), Freeze) +} + +func (Device Device) FreezeNvLinkUtilizationCounter(Link int, Counter int, Freeze EnableState) Return { + return DeviceFreezeNvLinkUtilizationCounter(Device, Link, Counter, Freeze) +} + +// nvml.DeviceResetNvLinkUtilizationCounter() +func DeviceResetNvLinkUtilizationCounter(Device Device, Link int, Counter int) Return { + return nvmlDeviceResetNvLinkUtilizationCounter(Device, uint32(Link), uint32(Counter)) +} + +func (Device Device) ResetNvLinkUtilizationCounter(Link int, Counter int) Return { + return DeviceResetNvLinkUtilizationCounter(Device, Link, Counter) +} + +// nvml.DeviceGetNvLinkRemoteDeviceType() +func DeviceGetNvLinkRemoteDeviceType(Device Device, Link int) (IntNvLinkDeviceType, Return) { + var NvLinkDeviceType IntNvLinkDeviceType + ret := nvmlDeviceGetNvLinkRemoteDeviceType(Device, uint32(Link), &NvLinkDeviceType) + return NvLinkDeviceType, ret +} + +func (Device Device) GetNvLinkRemoteDeviceType(Link int) (IntNvLinkDeviceType, Return) { + return DeviceGetNvLinkRemoteDeviceType(Device, Link) +} + +// nvml.DeviceRegisterEvents() +func DeviceRegisterEvents(Device Device, EventTypes uint64, Set EventSet) Return { + return nvmlDeviceRegisterEvents(Device, EventTypes, Set) +} + +func (Device Device) RegisterEvents(EventTypes uint64, Set EventSet) Return { + return DeviceRegisterEvents(Device, EventTypes, Set) +} + +// nvmlDeviceGetSupportedEventTypes() +func DeviceGetSupportedEventTypes(Device Device) (uint64, Return) { + var EventTypes uint64 + ret := nvmlDeviceGetSupportedEventTypes(Device, &EventTypes) + return EventTypes, ret +} + +func (Device Device) GetSupportedEventTypes() (uint64, Return) { + return DeviceGetSupportedEventTypes(Device) +} + +// nvml.DeviceModifyDrainState() +func DeviceModifyDrainState(PciInfo *PciInfo, NewState EnableState) Return { + return nvmlDeviceModifyDrainState(PciInfo, NewState) +} + +// nvml.DeviceQueryDrainState() +func DeviceQueryDrainState(PciInfo *PciInfo) (EnableState, Return) { + var CurrentState EnableState + ret := nvmlDeviceQueryDrainState(PciInfo, &CurrentState) + return CurrentState, ret +} + +// nvml.DeviceRemoveGpu() +func DeviceRemoveGpu(PciInfo *PciInfo) Return { + return nvmlDeviceRemoveGpu(PciInfo) +} + +// nvml.DeviceRemoveGpu_v2() +func DeviceRemoveGpu_v2(PciInfo *PciInfo, GpuState DetachGpuState, LinkState PcieLinkState) Return { + return nvmlDeviceRemoveGpu_v2(PciInfo, GpuState, LinkState) +} + +// nvml.DeviceDiscoverGpus() +func DeviceDiscoverGpus() (PciInfo, Return) { + var PciInfo PciInfo + ret := nvmlDeviceDiscoverGpus(&PciInfo) + return PciInfo, ret +} + +// nvml.DeviceGetFieldValues() +func DeviceGetFieldValues(Device Device, Values []FieldValue) Return { + ValuesCount := len(Values) + return nvmlDeviceGetFieldValues(Device, int32(ValuesCount), &Values[0]) +} + +func (Device Device) GetFieldValues(Values []FieldValue) Return { + return DeviceGetFieldValues(Device, Values) +} + +// nvml.DeviceGetVirtualizationMode() +func DeviceGetVirtualizationMode(Device Device) (GpuVirtualizationMode, Return) { + var PVirtualMode GpuVirtualizationMode + ret := nvmlDeviceGetVirtualizationMode(Device, &PVirtualMode) + return PVirtualMode, ret +} + +func (Device Device) GetVirtualizationMode() (GpuVirtualizationMode, Return) { + return DeviceGetVirtualizationMode(Device) +} + +// nvml.DeviceGetHostVgpuMode() +func DeviceGetHostVgpuMode(Device Device) (HostVgpuMode, Return) { + var PHostVgpuMode HostVgpuMode + ret := nvmlDeviceGetHostVgpuMode(Device, &PHostVgpuMode) + return PHostVgpuMode, ret +} + +func (Device Device) GetHostVgpuMode() (HostVgpuMode, Return) { + return DeviceGetHostVgpuMode(Device) +} + +// nvml.DeviceSetVirtualizationMode() +func DeviceSetVirtualizationMode(Device Device, VirtualMode GpuVirtualizationMode) Return { + return nvmlDeviceSetVirtualizationMode(Device, VirtualMode) +} + +func (Device Device) SetVirtualizationMode(VirtualMode GpuVirtualizationMode) Return { + return DeviceSetVirtualizationMode(Device, VirtualMode) +} + +// nvml.DeviceGetGridLicensableFeatures() +func DeviceGetGridLicensableFeatures(Device Device) (GridLicensableFeatures, Return) { + var PGridLicensableFeatures GridLicensableFeatures + ret := nvmlDeviceGetGridLicensableFeatures(Device, &PGridLicensableFeatures) + return PGridLicensableFeatures, ret +} + +func (Device Device) GetGridLicensableFeatures() (GridLicensableFeatures, Return) { + return DeviceGetGridLicensableFeatures(Device) +} + +// nvml.DeviceGetProcessUtilization() +func DeviceGetProcessUtilization(Device Device, LastSeenTimeStamp uint64) ([]ProcessUtilizationSample, Return) { + var ProcessSamplesCount uint32 + ret := nvmlDeviceGetProcessUtilization(Device, nil, &ProcessSamplesCount, LastSeenTimeStamp) + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + if ProcessSamplesCount == 0 { + return []ProcessUtilizationSample{}, ret + } + Utilization := make([]ProcessUtilizationSample, ProcessSamplesCount) + ret = nvmlDeviceGetProcessUtilization(Device, &Utilization[0], &ProcessSamplesCount, LastSeenTimeStamp) + return Utilization[:ProcessSamplesCount], ret +} + +func (Device Device) GetProcessUtilization(LastSeenTimeStamp uint64) ([]ProcessUtilizationSample, Return) { + return DeviceGetProcessUtilization(Device, LastSeenTimeStamp) +} + +// nvml.DeviceGetSupportedVgpus() +func DeviceGetSupportedVgpus(Device Device) ([]VgpuTypeId, Return) { + var VgpuCount uint32 = 1 // Will be reduced upon returning + for { + VgpuTypeIds := make([]VgpuTypeId, VgpuCount) + ret := nvmlDeviceGetSupportedVgpus(Device, &VgpuCount, &VgpuTypeIds[0]) + if ret == SUCCESS { + return VgpuTypeIds[:VgpuCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + VgpuCount *= 2 + } +} + +func (Device Device) GetSupportedVgpus() ([]VgpuTypeId, Return) { + return DeviceGetSupportedVgpus(Device) +} + +// nvml.DeviceGetCreatableVgpus() +func DeviceGetCreatableVgpus(Device Device) ([]VgpuTypeId, Return) { + var VgpuCount uint32 = 1 // Will be reduced upon returning + for { + VgpuTypeIds := make([]VgpuTypeId, VgpuCount) + ret := nvmlDeviceGetCreatableVgpus(Device, &VgpuCount, &VgpuTypeIds[0]) + if ret == SUCCESS { + return VgpuTypeIds[:VgpuCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + VgpuCount *= 2 + } +} + +func (Device Device) GetCreatableVgpus() ([]VgpuTypeId, Return) { + return DeviceGetCreatableVgpus(Device) +} + +// nvml.DeviceGetActiveVgpus() +func DeviceGetActiveVgpus(Device Device) ([]VgpuInstance, Return) { + var VgpuCount uint32 = 1 // Will be reduced upon returning + for { + VgpuInstances := make([]VgpuInstance, VgpuCount) + ret := nvmlDeviceGetActiveVgpus(Device, &VgpuCount, &VgpuInstances[0]) + if ret == SUCCESS { + return VgpuInstances[:VgpuCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + VgpuCount *= 2 + } +} + +func (Device Device) GetActiveVgpus() ([]VgpuInstance, Return) { + return DeviceGetActiveVgpus(Device) +} + +// nvml.DeviceGetVgpuMetadata() +func DeviceGetVgpuMetadata(Device Device) (VgpuPgpuMetadata, Return) { + var VgpuPgpuMetadata VgpuPgpuMetadata + OpaqueDataSize := unsafe.Sizeof(VgpuPgpuMetadata.nvmlVgpuPgpuMetadata.OpaqueData) + VgpuPgpuMetadataSize := unsafe.Sizeof(VgpuPgpuMetadata.nvmlVgpuPgpuMetadata) - OpaqueDataSize + for { + BufferSize := uint32(VgpuPgpuMetadataSize + OpaqueDataSize) + Buffer := make([]byte, BufferSize) + nvmlVgpuPgpuMetadataPtr := (*nvmlVgpuPgpuMetadata)(unsafe.Pointer(&Buffer[0])) + ret := nvmlDeviceGetVgpuMetadata(Device, nvmlVgpuPgpuMetadataPtr, &BufferSize) + if ret == SUCCESS { + VgpuPgpuMetadata.nvmlVgpuPgpuMetadata = *nvmlVgpuPgpuMetadataPtr + VgpuPgpuMetadata.OpaqueData = Buffer[VgpuPgpuMetadataSize:BufferSize] + return VgpuPgpuMetadata, ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return VgpuPgpuMetadata, ret + } + OpaqueDataSize = 2 * OpaqueDataSize + } +} + +func (Device Device) GetVgpuMetadata() (VgpuPgpuMetadata, Return) { + return DeviceGetVgpuMetadata(Device) +} + +// nvml.DeviceGetPgpuMetadataString() +func DeviceGetPgpuMetadataString(Device Device) (string, Return) { + var BufferSize uint32 = 1 // Will be reduced upon returning + for { + PgpuMetadata := make([]byte, BufferSize) + ret := nvmlDeviceGetPgpuMetadataString(Device, &PgpuMetadata[0], &BufferSize) + if ret == SUCCESS { + return string(PgpuMetadata[:clen(PgpuMetadata)]), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return "", ret + } + BufferSize *= 2 + } +} + +func (Device Device) GetPgpuMetadataString() (string, Return) { + return DeviceGetPgpuMetadataString(Device) +} + +// nvml.DeviceGetVgpuUtilization() +func DeviceGetVgpuUtilization(Device Device, LastSeenTimeStamp uint64) (ValueType, []VgpuInstanceUtilizationSample, Return) { + var SampleValType ValueType + var VgpuInstanceSamplesCount uint32 = 1 // Will be reduced upon returning + for { + UtilizationSamples := make([]VgpuInstanceUtilizationSample, VgpuInstanceSamplesCount) + ret := nvmlDeviceGetVgpuUtilization(Device, LastSeenTimeStamp, &SampleValType, &VgpuInstanceSamplesCount, &UtilizationSamples[0]) + if ret == SUCCESS { + return SampleValType, UtilizationSamples[:VgpuInstanceSamplesCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return SampleValType, nil, ret + } + VgpuInstanceSamplesCount *= 2 + } +} + +func (Device Device) GetVgpuUtilization(LastSeenTimeStamp uint64) (ValueType, []VgpuInstanceUtilizationSample, Return) { + return DeviceGetVgpuUtilization(Device, LastSeenTimeStamp) +} + +// nvml.DeviceGetAttributes() +func DeviceGetAttributes(Device Device) (DeviceAttributes, Return) { + var Attributes DeviceAttributes + ret := nvmlDeviceGetAttributes(Device, &Attributes) + return Attributes, ret +} + +func (Device Device) GetAttributes() (DeviceAttributes, Return) { + return DeviceGetAttributes(Device) +} + +// nvml.DeviceGetRemappedRows() +func DeviceGetRemappedRows(Device Device) (int, int, bool, bool, Return) { + var CorrRows, UncRows, IsPending, FailureOccured uint32 + ret := nvmlDeviceGetRemappedRows(Device, &CorrRows, &UncRows, &IsPending, &FailureOccured) + return int(CorrRows), int(UncRows), (IsPending != 0), (FailureOccured != 0), ret +} + +func (Device Device) GetRemappedRows() (int, int, bool, bool, Return) { + return DeviceGetRemappedRows(Device) +} + +// nvml.DeviceGetRowRemapperHistogram() +func DeviceGetRowRemapperHistogram(Device Device) (RowRemapperHistogramValues, Return) { + var Values RowRemapperHistogramValues + ret := nvmlDeviceGetRowRemapperHistogram(Device, &Values) + return Values, ret +} + +func (Device Device) GetRowRemapperHistogram() (RowRemapperHistogramValues, Return) { + return DeviceGetRowRemapperHistogram(Device) +} + +// nvml.DeviceGetArchitecture() +func DeviceGetArchitecture(Device Device) (DeviceArchitecture, Return) { + var Arch DeviceArchitecture + ret := nvmlDeviceGetArchitecture(Device, &Arch) + return Arch, ret +} + +func (Device Device) GetArchitecture() (DeviceArchitecture, Return) { + return DeviceGetArchitecture(Device) +} + +// nvml.DeviceGetVgpuProcessUtilization() +func DeviceGetVgpuProcessUtilization(Device Device, LastSeenTimeStamp uint64) ([]VgpuProcessUtilizationSample, Return) { + var VgpuProcessSamplesCount uint32 = 1 // Will be reduced upon returning + for { + UtilizationSamples := make([]VgpuProcessUtilizationSample, VgpuProcessSamplesCount) + ret := nvmlDeviceGetVgpuProcessUtilization(Device, LastSeenTimeStamp, &VgpuProcessSamplesCount, &UtilizationSamples[0]) + if ret == SUCCESS { + return UtilizationSamples[:VgpuProcessSamplesCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + VgpuProcessSamplesCount *= 2 + } +} + +func (Device Device) GetVgpuProcessUtilization(LastSeenTimeStamp uint64) ([]VgpuProcessUtilizationSample, Return) { + return DeviceGetVgpuProcessUtilization(Device, LastSeenTimeStamp) +} + +// nvml.GetExcludedDeviceCount() +func GetExcludedDeviceCount() (int, Return) { + var DeviceCount uint32 + ret := nvmlGetExcludedDeviceCount(&DeviceCount) + return int(DeviceCount), ret +} + +// nvml.GetExcludedDeviceInfoByIndex() +func GetExcludedDeviceInfoByIndex(Index int) (ExcludedDeviceInfo, Return) { + var Info ExcludedDeviceInfo + ret := nvmlGetExcludedDeviceInfoByIndex(uint32(Index), &Info) + return Info, ret +} + +// nvml.DeviceSetMigMode() +func DeviceSetMigMode(Device Device, Mode int) (Return, Return) { + var ActivationStatus Return + ret := nvmlDeviceSetMigMode(Device, uint32(Mode), &ActivationStatus) + return ActivationStatus, ret +} + +func (Device Device) SetMigMode(Mode int) (Return, Return) { + return DeviceSetMigMode(Device, Mode) +} + +// nvml.DeviceGetMigMode() +func DeviceGetMigMode(Device Device) (int, int, Return) { + var CurrentMode, PendingMode uint32 + ret := nvmlDeviceGetMigMode(Device, &CurrentMode, &PendingMode) + return int(CurrentMode), int(PendingMode), ret +} + +func (Device Device) GetMigMode() (int, int, Return) { + return DeviceGetMigMode(Device) +} + +// nvml.DeviceGetGpuInstanceProfileInfo() +func DeviceGetGpuInstanceProfileInfo(Device Device, Profile int) (GpuInstanceProfileInfo, Return) { + var Info GpuInstanceProfileInfo + ret := nvmlDeviceGetGpuInstanceProfileInfo(Device, uint32(Profile), &Info) + return Info, ret +} + +func (Device Device) GetGpuInstanceProfileInfo(Profile int) (GpuInstanceProfileInfo, Return) { + return DeviceGetGpuInstanceProfileInfo(Device, Profile) +} + +// nvml.DeviceGetGpuInstanceProfileInfoV() +type GpuInstanceProfileInfoV struct { + device Device + profile int +} + +func (InfoV GpuInstanceProfileInfoV) V1() (GpuInstanceProfileInfo, Return) { + return DeviceGetGpuInstanceProfileInfo(InfoV.device, InfoV.profile) +} + +func (InfoV GpuInstanceProfileInfoV) V2() (GpuInstanceProfileInfo_v2, Return) { + var Info GpuInstanceProfileInfo_v2 + Info.Version = STRUCT_VERSION(Info, 2) + ret := nvmlDeviceGetGpuInstanceProfileInfoV(InfoV.device, uint32(InfoV.profile), &Info) + return Info, ret +} + +func DeviceGetGpuInstanceProfileInfoV(Device Device, Profile int) GpuInstanceProfileInfoV { + return GpuInstanceProfileInfoV{Device, Profile} +} + +func (Device Device) GetGpuInstanceProfileInfoV(Profile int) GpuInstanceProfileInfoV { + return DeviceGetGpuInstanceProfileInfoV(Device, Profile) +} + +// nvml.DeviceGetGpuInstancePossiblePlacements() +func DeviceGetGpuInstancePossiblePlacements(Device Device, Info *GpuInstanceProfileInfo) ([]GpuInstancePlacement, Return) { + if Info == nil { + return nil, ERROR_INVALID_ARGUMENT + } + var Count uint32 + ret := nvmlDeviceGetGpuInstancePossiblePlacements(Device, Info.Id, nil, &Count) + if ret != SUCCESS { + return nil, ret + } + if Count == 0 { + return []GpuInstancePlacement{}, ret + } + Placements := make([]GpuInstancePlacement, Count) + ret = nvmlDeviceGetGpuInstancePossiblePlacements(Device, Info.Id, &Placements[0], &Count) + return Placements[:Count], ret +} + +func (Device Device) GetGpuInstancePossiblePlacements(Info *GpuInstanceProfileInfo) ([]GpuInstancePlacement, Return) { + return DeviceGetGpuInstancePossiblePlacements(Device, Info) +} + +// nvml.DeviceGetGpuInstanceRemainingCapacity() +func DeviceGetGpuInstanceRemainingCapacity(Device Device, Info *GpuInstanceProfileInfo) (int, Return) { + if Info == nil { + return 0, ERROR_INVALID_ARGUMENT + } + var Count uint32 + ret := nvmlDeviceGetGpuInstanceRemainingCapacity(Device, Info.Id, &Count) + return int(Count), ret +} + +func (Device Device) GetGpuInstanceRemainingCapacity(Info *GpuInstanceProfileInfo) (int, Return) { + return DeviceGetGpuInstanceRemainingCapacity(Device, Info) +} + +// nvml.DeviceCreateGpuInstance() +func DeviceCreateGpuInstance(Device Device, Info *GpuInstanceProfileInfo) (GpuInstance, Return) { + if Info == nil { + return GpuInstance{}, ERROR_INVALID_ARGUMENT + } + var GpuInstance GpuInstance + ret := nvmlDeviceCreateGpuInstance(Device, Info.Id, &GpuInstance) + return GpuInstance, ret +} + +func (Device Device) CreateGpuInstance(Info *GpuInstanceProfileInfo) (GpuInstance, Return) { + return DeviceCreateGpuInstance(Device, Info) +} + +// nvml.DeviceCreateGpuInstanceWithPlacement() +func DeviceCreateGpuInstanceWithPlacement(Device Device, Info *GpuInstanceProfileInfo, Placement *GpuInstancePlacement) (GpuInstance, Return) { + if Info == nil { + return GpuInstance{}, ERROR_INVALID_ARGUMENT + } + var GpuInstance GpuInstance + ret := nvmlDeviceCreateGpuInstanceWithPlacement(Device, Info.Id, Placement, &GpuInstance) + return GpuInstance, ret +} + +func (Device Device) CreateGpuInstanceWithPlacement(Info *GpuInstanceProfileInfo, Placement *GpuInstancePlacement) (GpuInstance, Return) { + return DeviceCreateGpuInstanceWithPlacement(Device, Info, Placement) +} + +// nvml.GpuInstanceDestroy() +func GpuInstanceDestroy(GpuInstance GpuInstance) Return { + return nvmlGpuInstanceDestroy(GpuInstance) +} + +func (GpuInstance GpuInstance) Destroy() Return { + return GpuInstanceDestroy(GpuInstance) +} + +// nvml.DeviceGetGpuInstances() +func DeviceGetGpuInstances(Device Device, Info *GpuInstanceProfileInfo) ([]GpuInstance, Return) { + if Info == nil { + return nil, ERROR_INVALID_ARGUMENT + } + var Count uint32 = Info.InstanceCount + GpuInstances := make([]GpuInstance, Count) + ret := nvmlDeviceGetGpuInstances(Device, Info.Id, &GpuInstances[0], &Count) + return GpuInstances[:Count], ret +} + +func (Device Device) GetGpuInstances(Info *GpuInstanceProfileInfo) ([]GpuInstance, Return) { + return DeviceGetGpuInstances(Device, Info) +} + +// nvml.DeviceGetGpuInstanceById() +func DeviceGetGpuInstanceById(Device Device, Id int) (GpuInstance, Return) { + var GpuInstance GpuInstance + ret := nvmlDeviceGetGpuInstanceById(Device, uint32(Id), &GpuInstance) + return GpuInstance, ret +} + +func (Device Device) GetGpuInstanceById(Id int) (GpuInstance, Return) { + return DeviceGetGpuInstanceById(Device, Id) +} + +// nvml.GpuInstanceGetInfo() +func GpuInstanceGetInfo(GpuInstance GpuInstance) (GpuInstanceInfo, Return) { + var Info GpuInstanceInfo + ret := nvmlGpuInstanceGetInfo(GpuInstance, &Info) + return Info, ret +} + +func (GpuInstance GpuInstance) GetInfo() (GpuInstanceInfo, Return) { + return GpuInstanceGetInfo(GpuInstance) +} + +// nvml.GpuInstanceGetComputeInstanceProfileInfo() +func GpuInstanceGetComputeInstanceProfileInfo(GpuInstance GpuInstance, Profile int, EngProfile int) (ComputeInstanceProfileInfo, Return) { + var Info ComputeInstanceProfileInfo + ret := nvmlGpuInstanceGetComputeInstanceProfileInfo(GpuInstance, uint32(Profile), uint32(EngProfile), &Info) + return Info, ret +} + +func (GpuInstance GpuInstance) GetComputeInstanceProfileInfo(Profile int, EngProfile int) (ComputeInstanceProfileInfo, Return) { + return GpuInstanceGetComputeInstanceProfileInfo(GpuInstance, Profile, EngProfile) +} + +// nvml.GpuInstanceGetComputeInstanceProfileInfoV() +type ComputeInstanceProfileInfoV struct { + gpuInstance GpuInstance + profile int + engProfile int +} + +func (InfoV ComputeInstanceProfileInfoV) V1() (ComputeInstanceProfileInfo, Return) { + return GpuInstanceGetComputeInstanceProfileInfo(InfoV.gpuInstance, InfoV.profile, InfoV.engProfile) +} + +func (InfoV ComputeInstanceProfileInfoV) V2() (ComputeInstanceProfileInfo_v2, Return) { + var Info ComputeInstanceProfileInfo_v2 + Info.Version = STRUCT_VERSION(Info, 2) + ret := nvmlGpuInstanceGetComputeInstanceProfileInfoV(InfoV.gpuInstance, uint32(InfoV.profile), uint32(InfoV.engProfile), &Info) + return Info, ret +} + +func GpuInstanceGetComputeInstanceProfileInfoV(GpuInstance GpuInstance, Profile int, EngProfile int) ComputeInstanceProfileInfoV { + return ComputeInstanceProfileInfoV{GpuInstance, Profile, EngProfile} +} + +func (GpuInstance GpuInstance) GetComputeInstanceProfileInfoV(Profile int, EngProfile int) ComputeInstanceProfileInfoV { + return GpuInstanceGetComputeInstanceProfileInfoV(GpuInstance, Profile, EngProfile) +} + +// nvml.GpuInstanceGetComputeInstanceRemainingCapacity() +func GpuInstanceGetComputeInstanceRemainingCapacity(GpuInstance GpuInstance, Info *ComputeInstanceProfileInfo) (int, Return) { + if Info == nil { + return 0, ERROR_INVALID_ARGUMENT + } + var Count uint32 + ret := nvmlGpuInstanceGetComputeInstanceRemainingCapacity(GpuInstance, Info.Id, &Count) + return int(Count), ret +} + +func (GpuInstance GpuInstance) GetComputeInstanceRemainingCapacity(Info *ComputeInstanceProfileInfo) (int, Return) { + return GpuInstanceGetComputeInstanceRemainingCapacity(GpuInstance, Info) +} + +// nvml.GpuInstanceCreateComputeInstance() +func GpuInstanceCreateComputeInstance(GpuInstance GpuInstance, Info *ComputeInstanceProfileInfo) (ComputeInstance, Return) { + if Info == nil { + return ComputeInstance{}, ERROR_INVALID_ARGUMENT + } + var ComputeInstance ComputeInstance + ret := nvmlGpuInstanceCreateComputeInstance(GpuInstance, Info.Id, &ComputeInstance) + return ComputeInstance, ret +} + +func (GpuInstance GpuInstance) CreateComputeInstance(Info *ComputeInstanceProfileInfo) (ComputeInstance, Return) { + return GpuInstanceCreateComputeInstance(GpuInstance, Info) +} + +// nvml.ComputeInstanceDestroy() +func ComputeInstanceDestroy(ComputeInstance ComputeInstance) Return { + return nvmlComputeInstanceDestroy(ComputeInstance) +} + +func (ComputeInstance ComputeInstance) Destroy() Return { + return ComputeInstanceDestroy(ComputeInstance) +} + +// nvml.GpuInstanceGetComputeInstances() +func GpuInstanceGetComputeInstances(GpuInstance GpuInstance, Info *ComputeInstanceProfileInfo) ([]ComputeInstance, Return) { + if Info == nil { + return nil, ERROR_INVALID_ARGUMENT + } + var Count uint32 = Info.InstanceCount + ComputeInstances := make([]ComputeInstance, Count) + ret := nvmlGpuInstanceGetComputeInstances(GpuInstance, Info.Id, &ComputeInstances[0], &Count) + return ComputeInstances[:Count], ret +} + +func (GpuInstance GpuInstance) GetComputeInstances(Info *ComputeInstanceProfileInfo) ([]ComputeInstance, Return) { + return GpuInstanceGetComputeInstances(GpuInstance, Info) +} + +// nvml.GpuInstanceGetComputeInstanceById() +func GpuInstanceGetComputeInstanceById(GpuInstance GpuInstance, Id int) (ComputeInstance, Return) { + var ComputeInstance ComputeInstance + ret := nvmlGpuInstanceGetComputeInstanceById(GpuInstance, uint32(Id), &ComputeInstance) + return ComputeInstance, ret +} + +func (GpuInstance GpuInstance) GetComputeInstanceById(Id int) (ComputeInstance, Return) { + return GpuInstanceGetComputeInstanceById(GpuInstance, Id) +} + +// nvml.ComputeInstanceGetInfo() +func ComputeInstanceGetInfo(ComputeInstance ComputeInstance) (ComputeInstanceInfo, Return) { + var Info ComputeInstanceInfo + ret := nvmlComputeInstanceGetInfo(ComputeInstance, &Info) + return Info, ret +} + +func (ComputeInstance ComputeInstance) GetInfo() (ComputeInstanceInfo, Return) { + return ComputeInstanceGetInfo(ComputeInstance) +} + +// nvml.DeviceIsMigDeviceHandle() +func DeviceIsMigDeviceHandle(Device Device) (bool, Return) { + var IsMigDevice uint32 + ret := nvmlDeviceIsMigDeviceHandle(Device, &IsMigDevice) + return (IsMigDevice != 0), ret +} + +func (Device Device) IsMigDeviceHandle() (bool, Return) { + return DeviceIsMigDeviceHandle(Device) +} + +// nvml DeviceGetGpuInstanceId() +func DeviceGetGpuInstanceId(Device Device) (int, Return) { + var Id uint32 + ret := nvmlDeviceGetGpuInstanceId(Device, &Id) + return int(Id), ret +} + +func (Device Device) GetGpuInstanceId() (int, Return) { + return DeviceGetGpuInstanceId(Device) +} + +// nvml.DeviceGetComputeInstanceId() +func DeviceGetComputeInstanceId(Device Device) (int, Return) { + var Id uint32 + ret := nvmlDeviceGetComputeInstanceId(Device, &Id) + return int(Id), ret +} + +func (Device Device) GetComputeInstanceId() (int, Return) { + return DeviceGetComputeInstanceId(Device) +} + +// nvml.DeviceGetMaxMigDeviceCount() +func DeviceGetMaxMigDeviceCount(Device Device) (int, Return) { + var Count uint32 + ret := nvmlDeviceGetMaxMigDeviceCount(Device, &Count) + return int(Count), ret +} + +func (Device Device) GetMaxMigDeviceCount() (int, Return) { + return DeviceGetMaxMigDeviceCount(Device) +} + +// nvml.DeviceGetMigDeviceHandleByIndex() +func DeviceGetMigDeviceHandleByIndex(device Device, Index int) (Device, Return) { + var MigDevice Device + ret := nvmlDeviceGetMigDeviceHandleByIndex(device, uint32(Index), &MigDevice) + return MigDevice, ret +} + +func (Device Device) GetMigDeviceHandleByIndex(Index int) (Device, Return) { + return DeviceGetMigDeviceHandleByIndex(Device, Index) +} + +// nvml.DeviceGetDeviceHandleFromMigDeviceHandle() +func DeviceGetDeviceHandleFromMigDeviceHandle(MigDevice Device) (Device, Return) { + var Device Device + ret := nvmlDeviceGetDeviceHandleFromMigDeviceHandle(MigDevice, &Device) + return Device, ret +} + +func (MigDevice Device) GetDeviceHandleFromMigDeviceHandle() (Device, Return) { + return DeviceGetDeviceHandleFromMigDeviceHandle(MigDevice) +} + +// nvml.DeviceGetBusType() +func DeviceGetBusType(Device Device) (BusType, Return) { + var Type BusType + ret := nvmlDeviceGetBusType(Device, &Type) + return Type, ret +} + +func (Device Device) GetBusType() (BusType, Return) { + return DeviceGetBusType(Device) +} + +// nvml.DeviceSetDefaultFanSpeed_v2() +func DeviceSetDefaultFanSpeed_v2(Device Device, Fan int) Return { + return nvmlDeviceSetDefaultFanSpeed_v2(Device, uint32(Fan)) +} + +func (Device Device) SetDefaultFanSpeed_v2(Fan int) Return { + return DeviceSetDefaultFanSpeed_v2(Device, Fan) +} + +// nvml.DeviceGetMinMaxFanSpeed() +func DeviceGetMinMaxFanSpeed(Device Device) (int, int, Return) { + var MinSpeed, MaxSpeed uint32 + ret := nvmlDeviceGetMinMaxFanSpeed(Device, &MinSpeed, &MaxSpeed) + return int(MinSpeed), int(MaxSpeed), ret +} + +func (Device Device) GetMinMaxFanSpeed() (int, int, Return) { + return DeviceGetMinMaxFanSpeed(Device) +} + +// nvml.DeviceGetThermalSettings() +func DeviceGetThermalSettings(Device Device, SensorIndex uint32) (GpuThermalSettings, Return) { + var PThermalSettings GpuThermalSettings + ret := nvmlDeviceGetThermalSettings(Device, SensorIndex, &PThermalSettings) + return PThermalSettings, ret +} + +func (Device Device) GetThermalSettings(SensorIndex uint32) (GpuThermalSettings, Return) { + return DeviceGetThermalSettings(Device, SensorIndex) +} + +// nvml.DeviceGetDefaultEccMode() +func DeviceGetDefaultEccMode(Device Device) (EnableState, Return) { + var DefaultMode EnableState + ret := nvmlDeviceGetDefaultEccMode(Device, &DefaultMode) + return DefaultMode, ret +} + +func (Device Device) GetDefaultEccMode() (EnableState, Return) { + return DeviceGetDefaultEccMode(Device) +} + +// nvml.DeviceGetPcieSpeed() +func DeviceGetPcieSpeed(Device Device) (int, Return) { + var PcieSpeed uint32 + ret := nvmlDeviceGetPcieSpeed(Device, &PcieSpeed) + return int(PcieSpeed), ret +} + +func (Device Device) GetPcieSpeed() (int, Return) { + return DeviceGetPcieSpeed(Device) +} + +// nvml.DeviceGetGspFirmwareVersion() +func DeviceGetGspFirmwareVersion(Device Device) (string, Return) { + Version := make([]byte, GSP_FIRMWARE_VERSION_BUF_SIZE) + ret := nvmlDeviceGetGspFirmwareVersion(Device, &Version[0]) + return string(Version[:clen(Version)]), ret +} + +func (Device Device) GetGspFirmwareVersion() (string, Return) { + return DeviceGetGspFirmwareVersion(Device) +} + +// nvml.DeviceGetGspFirmwareMode() +func DeviceGetGspFirmwareMode(Device Device) (bool, bool, Return) { + var IsEnabled, DefaultMode uint32 + ret := nvmlDeviceGetGspFirmwareMode(Device, &IsEnabled, &DefaultMode) + return (IsEnabled != 0), (DefaultMode != 0), ret +} + +func (Device Device) GetGspFirmwareMode() (bool, bool, Return) { + return DeviceGetGspFirmwareMode(Device) +} + +// nvml.DeviceGetDynamicPstatesInfo() +func DeviceGetDynamicPstatesInfo(Device Device) (GpuDynamicPstatesInfo, Return) { + var PDynamicPstatesInfo GpuDynamicPstatesInfo + ret := nvmlDeviceGetDynamicPstatesInfo(Device, &PDynamicPstatesInfo) + return PDynamicPstatesInfo, ret +} + +func (Device Device) GetDynamicPstatesInfo() (GpuDynamicPstatesInfo, Return) { + return DeviceGetDynamicPstatesInfo(Device) +} + +// nvml.DeviceSetFanSpeed_v2() +func DeviceSetFanSpeed_v2(Device Device, Fan int, Speed int) Return { + return nvmlDeviceSetFanSpeed_v2(Device, uint32(Fan), uint32(Speed)) +} + +func (Device Device) SetFanSpeed_v2(Fan int, Speed int) Return { + return DeviceSetFanSpeed_v2(Device, Fan, Speed) +} + +// nvml.DeviceGetGpcClkVfOffset() +func DeviceGetGpcClkVfOffset(Device Device) (int, Return) { + var Offset int32 + ret := nvmlDeviceGetGpcClkVfOffset(Device, &Offset) + return int(Offset), ret +} + +func (Device Device) GetGpcClkVfOffset() (int, Return) { + return DeviceGetGpcClkVfOffset(Device) +} + +// nvml.DeviceSetGpcClkVfOffset() +func DeviceSetGpcClkVfOffset(Device Device, Offset int) Return { + return nvmlDeviceSetGpcClkVfOffset(Device, int32(Offset)) +} + +func (Device Device) SetGpcClkVfOffset(Offset int) Return { + return DeviceSetGpcClkVfOffset(Device, Offset) +} + +// nvml.DeviceGetMinMaxClockOfPState() +func DeviceGetMinMaxClockOfPState(Device Device, _type ClockType, Pstate Pstates) (uint32, uint32, Return) { + var MinClockMHz, MaxClockMHz uint32 + ret := nvmlDeviceGetMinMaxClockOfPState(Device, _type, Pstate, &MinClockMHz, &MaxClockMHz) + return MinClockMHz, MaxClockMHz, ret +} + +func (Device Device) GetMinMaxClockOfPState(_type ClockType, Pstate Pstates) (uint32, uint32, Return) { + return DeviceGetMinMaxClockOfPState(Device, _type, Pstate) +} + +// nvml.DeviceGetSupportedPerformanceStates() +func DeviceGetSupportedPerformanceStates(Device Device) ([]Pstates, Return) { + Pstates := make([]Pstates, MAX_GPU_PERF_PSTATES) + ret := nvmlDeviceGetSupportedPerformanceStates(Device, &Pstates[0], MAX_GPU_PERF_PSTATES) + for i := 0; i < MAX_GPU_PERF_PSTATES; i++ { + if Pstates[i] == PSTATE_UNKNOWN { + return Pstates[0:i], ret + } + } + return Pstates, ret +} + +func (Device Device) GetSupportedPerformanceStates() ([]Pstates, Return) { + return DeviceGetSupportedPerformanceStates(Device) +} + +// nvml.DeviceGetTargetFanSpeed() +func DeviceGetTargetFanSpeed(Device Device, Fan int) (int, Return) { + var TargetSpeed uint32 + ret := nvmlDeviceGetTargetFanSpeed(Device, uint32(Fan), &TargetSpeed) + return int(TargetSpeed), ret +} + +func (Device Device) GetTargetFanSpeed(Fan int) (int, Return) { + return DeviceGetTargetFanSpeed(Device, Fan) +} + +// nvml.DeviceGetMemClkVfOffset() +func DeviceGetMemClkVfOffset(Device Device) (int, Return) { + var Offset int32 + ret := nvmlDeviceGetMemClkVfOffset(Device, &Offset) + return int(Offset), ret +} + +func (Device Device) GetMemClkVfOffset() (int, Return) { + return DeviceGetMemClkVfOffset(Device) +} + +// nvml.DeviceSetMemClkVfOffset() +func DeviceSetMemClkVfOffset(Device Device, Offset int) Return { + return nvmlDeviceSetMemClkVfOffset(Device, int32(Offset)) +} + +func (Device Device) SetMemClkVfOffset(Offset int) Return { + return DeviceSetMemClkVfOffset(Device, Offset) +} + +// nvml.DeviceGetGpcClkMinMaxVfOffset() +func DeviceGetGpcClkMinMaxVfOffset(Device Device) (int, int, Return) { + var MinOffset, MaxOffset int32 + ret := nvmlDeviceGetGpcClkMinMaxVfOffset(Device, &MinOffset, &MaxOffset) + return int(MinOffset), int(MaxOffset), ret +} + +func (Device Device) GetGpcClkMinMaxVfOffset() (int, int, Return) { + return DeviceGetGpcClkMinMaxVfOffset(Device) +} + +// nvml.DeviceGetMemClkMinMaxVfOffset() +func DeviceGetMemClkMinMaxVfOffset(Device Device) (int, int, Return) { + var MinOffset, MaxOffset int32 + ret := nvmlDeviceGetMemClkMinMaxVfOffset(Device, &MinOffset, &MaxOffset) + return int(MinOffset), int(MaxOffset), ret +} + +func (Device Device) GetMemClkMinMaxVfOffset() (int, int, Return) { + return DeviceGetMemClkMinMaxVfOffset(Device) +} + +// nvml.DeviceGetGpuMaxPcieLinkGeneration() +func DeviceGetGpuMaxPcieLinkGeneration(Device Device) (int, Return) { + var MaxLinkGenDevice uint32 + ret := nvmlDeviceGetGpuMaxPcieLinkGeneration(Device, &MaxLinkGenDevice) + return int(MaxLinkGenDevice), ret +} + +func (Device Device) GetGpuMaxPcieLinkGeneration() (int, Return) { + return DeviceGetGpuMaxPcieLinkGeneration(Device) +} + +// nvml.DeviceGetFanControlPolicy_v2() +func DeviceGetFanControlPolicy_v2(Device Device, Fan int) (FanControlPolicy, Return) { + var Policy FanControlPolicy + ret := nvmlDeviceGetFanControlPolicy_v2(Device, uint32(Fan), &Policy) + return Policy, ret +} + +func (Device Device) GetFanControlPolicy_v2(Fan int) (FanControlPolicy, Return) { + return DeviceGetFanControlPolicy_v2(Device, Fan) +} + +// nvml.DeviceSetFanControlPolicy() +func DeviceSetFanControlPolicy(Device Device, Fan int, Policy FanControlPolicy) Return { + return nvmlDeviceSetFanControlPolicy(Device, uint32(Fan), Policy) +} + +func (Device Device) SetFanControlPolicy(Fan int, Policy FanControlPolicy) Return { + return DeviceSetFanControlPolicy(Device, Fan, Policy) +} + +// nvml.DeviceClearFieldValues() +func DeviceClearFieldValues(Device Device, Values []FieldValue) Return { + ValuesCount := len(Values) + return nvmlDeviceClearFieldValues(Device, int32(ValuesCount), &Values[0]) +} + +func (Device Device) ClearFieldValues(Values []FieldValue) Return { + return DeviceClearFieldValues(Device, Values) +} + +// nvml.DeviceGetVgpuCapabilities() +func DeviceGetVgpuCapabilities(Device Device, Capability DeviceVgpuCapability) (bool, Return) { + var CapResult uint32 + ret := nvmlDeviceGetVgpuCapabilities(Device, Capability, &CapResult) + return (CapResult != 0), ret +} + +func (Device Device) GetVgpuCapabilities(Capability DeviceVgpuCapability) (bool, Return) { + return DeviceGetVgpuCapabilities(Device, Capability) +} + +// nvml.DeviceGetVgpuSchedulerLog() +func DeviceGetVgpuSchedulerLog(Device Device) (VgpuSchedulerLog, Return) { + var PSchedulerLog VgpuSchedulerLog + ret := nvmlDeviceGetVgpuSchedulerLog(Device, &PSchedulerLog) + return PSchedulerLog, ret +} + +func (Device Device) GetVgpuSchedulerLog() (VgpuSchedulerLog, Return) { + return DeviceGetVgpuSchedulerLog(Device) +} + +// nvml.DeviceGetVgpuSchedulerState() +func DeviceGetVgpuSchedulerState(Device Device) (VgpuSchedulerGetState, Return) { + var PSchedulerState VgpuSchedulerGetState + ret := nvmlDeviceGetVgpuSchedulerState(Device, &PSchedulerState) + return PSchedulerState, ret +} + +func (Device Device) GetVgpuSchedulerState() (VgpuSchedulerGetState, Return) { + return DeviceGetVgpuSchedulerState(Device) +} + +// nvml.DeviceSetVgpuSchedulerState() +func DeviceSetVgpuSchedulerState(Device Device, PSchedulerState *VgpuSchedulerSetState) Return { + return nvmlDeviceSetVgpuSchedulerState(Device, PSchedulerState) +} + +func (Device Device) SetVgpuSchedulerState(PSchedulerState *VgpuSchedulerSetState) Return { + return DeviceSetVgpuSchedulerState(Device, PSchedulerState) +} + +// nvml.DeviceGetVgpuSchedulerCapabilities() +func DeviceGetVgpuSchedulerCapabilities(Device Device) (VgpuSchedulerCapabilities, Return) { + var PCapabilities VgpuSchedulerCapabilities + ret := nvmlDeviceGetVgpuSchedulerCapabilities(Device, &PCapabilities) + return PCapabilities, ret +} + +func (Device Device) GetVgpuSchedulerCapabilities() (VgpuSchedulerCapabilities, Return) { + return DeviceGetVgpuSchedulerCapabilities(Device) +} + +// nvml.GpuInstanceGetComputeInstancePossiblePlacements() +func GpuInstanceGetComputeInstancePossiblePlacements(GpuInstance GpuInstance, Info *ComputeInstanceProfileInfo) ([]ComputeInstancePlacement, Return) { + var Count uint32 + ret := nvmlGpuInstanceGetComputeInstancePossiblePlacements(GpuInstance, Info.Id, nil, &Count) + if ret != SUCCESS { + return nil, ret + } + if Count == 0 { + return []ComputeInstancePlacement{}, ret + } + PlacementArray := make([]ComputeInstancePlacement, Count) + ret = nvmlGpuInstanceGetComputeInstancePossiblePlacements(GpuInstance, Info.Id, &PlacementArray[0], &Count) + return PlacementArray, ret +} + +func (GpuInstance GpuInstance) GetComputeInstancePossiblePlacements(Info *ComputeInstanceProfileInfo) ([]ComputeInstancePlacement, Return) { + return GpuInstanceGetComputeInstancePossiblePlacements(GpuInstance, Info) +} + +// nvml.GpuInstanceCreateComputeInstanceWithPlacement() +func GpuInstanceCreateComputeInstanceWithPlacement(GpuInstance GpuInstance, Info *ComputeInstanceProfileInfo, Placement *ComputeInstancePlacement, ComputeInstance *ComputeInstance) Return { + return nvmlGpuInstanceCreateComputeInstanceWithPlacement(GpuInstance, Info.Id, Placement, ComputeInstance) +} + +func (GpuInstance GpuInstance) CreateComputeInstanceWithPlacement(Info *ComputeInstanceProfileInfo, Placement *ComputeInstancePlacement, ComputeInstance *ComputeInstance) Return { + return GpuInstanceCreateComputeInstanceWithPlacement(GpuInstance, Info, Placement, ComputeInstance) +} + +// nvml.DeviceGetGpuFabricInfo() +func DeviceGetGpuFabricInfo(Device Device) (GpuFabricInfo, Return) { + var GpuFabricInfo GpuFabricInfo + ret := nvmlDeviceGetGpuFabricInfo(Device, &GpuFabricInfo) + return GpuFabricInfo, ret +} + +func (Device Device) GetGpuFabricInfo() (GpuFabricInfo, Return) { + return DeviceGetGpuFabricInfo(Device) +} + +// nvml.DeviceCcuGetStreamState() +func DeviceCcuGetStreamState(Device Device) (int, Return) { + var State uint32 + ret := nvmlDeviceCcuGetStreamState(Device, &State) + return int(State), ret +} + +func (Device Device) CcuGetStreamState() (int, Return) { + return DeviceCcuGetStreamState(Device) +} + +// nvml.DeviceCcuSetStreamState() +func DeviceCcuSetStreamState(Device Device, State int) Return { + return nvmlDeviceCcuSetStreamState(Device, uint32(State)) +} + +func (Device Device) CcuSetStreamState(State int) Return { + return DeviceCcuSetStreamState(Device, State) +} + +// nvml.DeviceSetNvLinkDeviceLowPowerThreshold() +func DeviceSetNvLinkDeviceLowPowerThreshold(Device Device, Info *NvLinkPowerThres) Return { + return nvmlDeviceSetNvLinkDeviceLowPowerThreshold(Device, Info) +} + +func (Device Device) SetNvLinkDeviceLowPowerThreshold(Info *NvLinkPowerThres) Return { + return DeviceSetNvLinkDeviceLowPowerThreshold(Device, Info) +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/doc.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/doc.go new file mode 100644 index 0000000..c2ce2e3 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/doc.go @@ -0,0 +1,21 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. +// Code generated by https://git.io/c-for-go. DO NOT EDIT. + +/* +Package NVML bindings +*/ +package nvml diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/event_set.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/event_set.go new file mode 100644 index 0000000..c6315f5 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/event_set.go @@ -0,0 +1,42 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +// nvml.EventSetCreate() +func EventSetCreate() (EventSet, Return) { + var Set EventSet + ret := nvmlEventSetCreate(&Set) + return Set, ret +} + +// nvml.EventSetWait() +func EventSetWait(Set EventSet, Timeoutms uint32) (EventData, Return) { + var Data EventData + ret := nvmlEventSetWait(Set, &Data, Timeoutms) + return Data, ret +} + +func (Set EventSet) Wait(Timeoutms uint32) (EventData, Return) { + return EventSetWait(Set, Timeoutms) +} + +// nvml.EventSetFree() +func EventSetFree(Set EventSet) Return { + return nvmlEventSetFree(Set) +} + +func (Set EventSet) Free() Return { + return EventSetFree(Set) +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/init.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/init.go new file mode 100644 index 0000000..1572f81 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/init.go @@ -0,0 +1,218 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +import ( + "fmt" + + "github.com/NVIDIA/go-nvml/pkg/dl" +) + +import "C" + +const ( + nvmlLibraryName = "libnvidia-ml.so.1" + nvmlLibraryLoadFlags = dl.RTLD_LAZY | dl.RTLD_GLOBAL +) + +var nvml *dl.DynamicLibrary + +// nvml.Init() +func Init() Return { + lib := dl.New(nvmlLibraryName, nvmlLibraryLoadFlags) + err := lib.Open() + if err != nil { + return ERROR_LIBRARY_NOT_FOUND + } + + nvml = lib + updateVersionedSymbols() + + return nvmlInit() +} + +// nvml.InitWithFlags() +func InitWithFlags(Flags uint32) Return { + lib := dl.New(nvmlLibraryName, nvmlLibraryLoadFlags) + err := lib.Open() + if err != nil { + return ERROR_LIBRARY_NOT_FOUND + } + + nvml = lib + + return nvmlInitWithFlags(Flags) +} + +// nvml.Shutdown() +func Shutdown() Return { + ret := nvmlShutdown() + if ret != SUCCESS { + return ret + } + + err := nvml.Close() + if err != nil { + panic(fmt.Sprintf("error closing %s: %v", nvmlLibraryName, err)) + } + + return ret +} + +// Default all versioned APIs to v1 (to infer the types) +var nvmlInit = nvmlInit_v1 +var nvmlDeviceGetPciInfo = nvmlDeviceGetPciInfo_v1 +var nvmlDeviceGetCount = nvmlDeviceGetCount_v1 +var nvmlDeviceGetHandleByIndex = nvmlDeviceGetHandleByIndex_v1 +var nvmlDeviceGetHandleByPciBusId = nvmlDeviceGetHandleByPciBusId_v1 +var nvmlDeviceGetNvLinkRemotePciInfo = nvmlDeviceGetNvLinkRemotePciInfo_v1 +var nvmlDeviceRemoveGpu = nvmlDeviceRemoveGpu_v1 +var nvmlDeviceGetGridLicensableFeatures = nvmlDeviceGetGridLicensableFeatures_v1 +var nvmlEventSetWait = nvmlEventSetWait_v1 +var nvmlDeviceGetAttributes = nvmlDeviceGetAttributes_v1 +var nvmlComputeInstanceGetInfo = nvmlComputeInstanceGetInfo_v1 +var DeviceGetComputeRunningProcesses = deviceGetComputeRunningProcesses_v1 +var DeviceGetGraphicsRunningProcesses = deviceGetGraphicsRunningProcesses_v1 +var DeviceGetMPSComputeRunningProcesses = deviceGetMPSComputeRunningProcesses_v1 +var GetBlacklistDeviceCount = GetExcludedDeviceCount +var GetBlacklistDeviceInfoByIndex = GetExcludedDeviceInfoByIndex +var nvmlDeviceGetGpuInstancePossiblePlacements = nvmlDeviceGetGpuInstancePossiblePlacements_v1 +var nvmlVgpuInstanceGetLicenseInfo = nvmlVgpuInstanceGetLicenseInfo_v1 + +type BlacklistDeviceInfo = ExcludedDeviceInfo +type ProcessInfo_v1Slice []ProcessInfo_v1 +type ProcessInfo_v2Slice []ProcessInfo_v2 + +func (pis ProcessInfo_v1Slice) ToProcessInfoSlice() []ProcessInfo { + var newInfos []ProcessInfo + for _, pi := range pis { + info := ProcessInfo{ + Pid: pi.Pid, + UsedGpuMemory: pi.UsedGpuMemory, + GpuInstanceId: 0xFFFFFFFF, // GPU instance ID is invalid in v1 + ComputeInstanceId: 0xFFFFFFFF, // Compute instance ID is invalid in v1 + } + newInfos = append(newInfos, info) + } + return newInfos +} + +func (pis ProcessInfo_v2Slice) ToProcessInfoSlice() []ProcessInfo { + var newInfos []ProcessInfo + for _, pi := range pis { + info := ProcessInfo{ + Pid: pi.Pid, + UsedGpuMemory: pi.UsedGpuMemory, + GpuInstanceId: pi.GpuInstanceId, + ComputeInstanceId: pi.ComputeInstanceId, + } + newInfos = append(newInfos, info) + } + return newInfos +} + +// updateVersionedSymbols() +func updateVersionedSymbols() { + err := nvml.Lookup("nvmlInit_v2") + if err == nil { + nvmlInit = nvmlInit_v2 + } + err = nvml.Lookup("nvmlDeviceGetPciInfo_v2") + if err == nil { + nvmlDeviceGetPciInfo = nvmlDeviceGetPciInfo_v2 + } + err = nvml.Lookup("nvmlDeviceGetPciInfo_v3") + if err == nil { + nvmlDeviceGetPciInfo = nvmlDeviceGetPciInfo_v3 + } + err = nvml.Lookup("nvmlDeviceGetCount_v2") + if err == nil { + nvmlDeviceGetCount = nvmlDeviceGetCount_v2 + } + err = nvml.Lookup("nvmlDeviceGetHandleByIndex_v2") + if err == nil { + nvmlDeviceGetHandleByIndex = nvmlDeviceGetHandleByIndex_v2 + } + err = nvml.Lookup("nvmlDeviceGetHandleByPciBusId_v2") + if err == nil { + nvmlDeviceGetHandleByPciBusId = nvmlDeviceGetHandleByPciBusId_v2 + } + err = nvml.Lookup("nvmlDeviceGetNvLinkRemotePciInfo_v2") + if err == nil { + nvmlDeviceGetNvLinkRemotePciInfo = nvmlDeviceGetNvLinkRemotePciInfo_v2 + } + // Unable to overwrite nvmlDeviceRemoveGpu() because the v2 function takes + // a different set of parameters than the v1 function. + //err = nvml.Lookup("nvmlDeviceRemoveGpu_v2") + //if err == nil { + // nvmlDeviceRemoveGpu = nvmlDeviceRemoveGpu_v2 + //} + err = nvml.Lookup("nvmlDeviceGetGridLicensableFeatures_v2") + if err == nil { + nvmlDeviceGetGridLicensableFeatures = nvmlDeviceGetGridLicensableFeatures_v2 + } + err = nvml.Lookup("nvmlDeviceGetGridLicensableFeatures_v3") + if err == nil { + nvmlDeviceGetGridLicensableFeatures = nvmlDeviceGetGridLicensableFeatures_v3 + } + err = nvml.Lookup("nvmlDeviceGetGridLicensableFeatures_v4") + if err == nil { + nvmlDeviceGetGridLicensableFeatures = nvmlDeviceGetGridLicensableFeatures_v4 + } + err = nvml.Lookup("nvmlEventSetWait_v2") + if err == nil { + nvmlEventSetWait = nvmlEventSetWait_v2 + } + err = nvml.Lookup("nvmlDeviceGetAttributes_v2") + if err == nil { + nvmlDeviceGetAttributes = nvmlDeviceGetAttributes_v2 + } + err = nvml.Lookup("nvmlComputeInstanceGetInfo_v2") + if err == nil { + nvmlComputeInstanceGetInfo = nvmlComputeInstanceGetInfo_v2 + } + err = nvml.Lookup("nvmlDeviceGetComputeRunningProcesses_v2") + if err == nil { + DeviceGetComputeRunningProcesses = deviceGetComputeRunningProcesses_v2 + } + err = nvml.Lookup("nvmlDeviceGetComputeRunningProcesses_v3") + if err == nil { + DeviceGetComputeRunningProcesses = deviceGetComputeRunningProcesses_v3 + } + err = nvml.Lookup("nvmlDeviceGetGraphicsRunningProcesses_v2") + if err == nil { + DeviceGetGraphicsRunningProcesses = deviceGetGraphicsRunningProcesses_v2 + } + err = nvml.Lookup("nvmlDeviceGetGraphicsRunningProcesses_v3") + if err == nil { + DeviceGetGraphicsRunningProcesses = deviceGetGraphicsRunningProcesses_v3 + } + err = nvml.Lookup("nvmlDeviceGetMPSComputeRunningProcesses_v2") + if err == nil { + DeviceGetMPSComputeRunningProcesses = deviceGetMPSComputeRunningProcesses_v2 + } + err = nvml.Lookup("nvmlDeviceGetMPSComputeRunningProcesses_v3") + if err == nil { + DeviceGetMPSComputeRunningProcesses = deviceGetMPSComputeRunningProcesses_v3 + } + err = nvml.Lookup("nvmlDeviceGetGpuInstancePossiblePlacements_v2") + if err == nil { + nvmlDeviceGetGpuInstancePossiblePlacements = nvmlDeviceGetGpuInstancePossiblePlacements_v2 + } + err = nvml.Lookup("nvmlVgpuInstanceGetLicenseInfo_v2") + if err == nil { + nvmlVgpuInstanceGetLicenseInfo = nvmlVgpuInstanceGetLicenseInfo_v2 + } +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.go new file mode 100644 index 0000000..f63dfe8 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.go @@ -0,0 +1,2970 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. +// Code generated by https://git.io/c-for-go. DO NOT EDIT. + +package nvml + +/* +#cgo LDFLAGS: -Wl,--unresolved-symbols=ignore-in-object-files +#cgo CFLAGS: -DNVML_NO_UNVERSIONED_FUNC_DEFS=1 +#include "nvml.h" +#include +#include "cgo_helpers.h" +*/ +import "C" +import "unsafe" + +// nvmlInit_v2 function as declared in nvml/nvml.h +func nvmlInit_v2() Return { + __ret := C.nvmlInit_v2() + __v := (Return)(__ret) + return __v +} + +// nvmlInitWithFlags function as declared in nvml/nvml.h +func nvmlInitWithFlags(Flags uint32) Return { + cFlags, _ := (C.uint)(Flags), cgoAllocsUnknown + __ret := C.nvmlInitWithFlags(cFlags) + __v := (Return)(__ret) + return __v +} + +// nvmlShutdown function as declared in nvml/nvml.h +func nvmlShutdown() Return { + __ret := C.nvmlShutdown() + __v := (Return)(__ret) + return __v +} + +// nvmlErrorString function as declared in nvml/nvml.h +func nvmlErrorString(Result Return) string { + cResult, _ := (C.nvmlReturn_t)(Result), cgoAllocsUnknown + __ret := C.nvmlErrorString(cResult) + __v := packPCharString(__ret) + return __v +} + +// nvmlSystemGetDriverVersion function as declared in nvml/nvml.h +func nvmlSystemGetDriverVersion(Version *byte, Length uint32) Return { + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlSystemGetDriverVersion(cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetNVMLVersion function as declared in nvml/nvml.h +func nvmlSystemGetNVMLVersion(Version *byte, Length uint32) Return { + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlSystemGetNVMLVersion(cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetCudaDriverVersion function as declared in nvml/nvml.h +func nvmlSystemGetCudaDriverVersion(CudaDriverVersion *int32) Return { + cCudaDriverVersion, _ := (*C.int)(unsafe.Pointer(CudaDriverVersion)), cgoAllocsUnknown + __ret := C.nvmlSystemGetCudaDriverVersion(cCudaDriverVersion) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetCudaDriverVersion_v2 function as declared in nvml/nvml.h +func nvmlSystemGetCudaDriverVersion_v2(CudaDriverVersion *int32) Return { + cCudaDriverVersion, _ := (*C.int)(unsafe.Pointer(CudaDriverVersion)), cgoAllocsUnknown + __ret := C.nvmlSystemGetCudaDriverVersion_v2(cCudaDriverVersion) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetProcessName function as declared in nvml/nvml.h +func nvmlSystemGetProcessName(Pid uint32, Name *byte, Length uint32) Return { + cPid, _ := (C.uint)(Pid), cgoAllocsUnknown + cName, _ := (*C.char)(unsafe.Pointer(Name)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlSystemGetProcessName(cPid, cName, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetCount function as declared in nvml/nvml.h +func nvmlUnitGetCount(UnitCount *uint32) Return { + cUnitCount, _ := (*C.uint)(unsafe.Pointer(UnitCount)), cgoAllocsUnknown + __ret := C.nvmlUnitGetCount(cUnitCount) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetHandleByIndex function as declared in nvml/nvml.h +func nvmlUnitGetHandleByIndex(Index uint32, Unit *Unit) Return { + cIndex, _ := (C.uint)(Index), cgoAllocsUnknown + cUnit, _ := (*C.nvmlUnit_t)(unsafe.Pointer(Unit)), cgoAllocsUnknown + __ret := C.nvmlUnitGetHandleByIndex(cIndex, cUnit) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetUnitInfo function as declared in nvml/nvml.h +func nvmlUnitGetUnitInfo(Unit Unit, Info *UnitInfo) Return { + cUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&Unit)), cgoAllocsUnknown + cInfo, _ := (*C.nvmlUnitInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlUnitGetUnitInfo(cUnit, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetLedState function as declared in nvml/nvml.h +func nvmlUnitGetLedState(Unit Unit, State *LedState) Return { + cUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&Unit)), cgoAllocsUnknown + cState, _ := (*C.nvmlLedState_t)(unsafe.Pointer(State)), cgoAllocsUnknown + __ret := C.nvmlUnitGetLedState(cUnit, cState) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetPsuInfo function as declared in nvml/nvml.h +func nvmlUnitGetPsuInfo(Unit Unit, Psu *PSUInfo) Return { + cUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&Unit)), cgoAllocsUnknown + cPsu, _ := (*C.nvmlPSUInfo_t)(unsafe.Pointer(Psu)), cgoAllocsUnknown + __ret := C.nvmlUnitGetPsuInfo(cUnit, cPsu) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetTemperature function as declared in nvml/nvml.h +func nvmlUnitGetTemperature(Unit Unit, _type uint32, Temp *uint32) Return { + cUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&Unit)), cgoAllocsUnknown + c_type, _ := (C.uint)(_type), cgoAllocsUnknown + cTemp, _ := (*C.uint)(unsafe.Pointer(Temp)), cgoAllocsUnknown + __ret := C.nvmlUnitGetTemperature(cUnit, c_type, cTemp) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetFanSpeedInfo function as declared in nvml/nvml.h +func nvmlUnitGetFanSpeedInfo(Unit Unit, FanSpeeds *UnitFanSpeeds) Return { + cUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&Unit)), cgoAllocsUnknown + cFanSpeeds, _ := (*C.nvmlUnitFanSpeeds_t)(unsafe.Pointer(FanSpeeds)), cgoAllocsUnknown + __ret := C.nvmlUnitGetFanSpeedInfo(cUnit, cFanSpeeds) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitGetDevices function as declared in nvml/nvml.h +func nvmlUnitGetDevices(Unit Unit, DeviceCount *uint32, Devices *Device) Return { + cUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&Unit)), cgoAllocsUnknown + cDeviceCount, _ := (*C.uint)(unsafe.Pointer(DeviceCount)), cgoAllocsUnknown + cDevices, _ := (*C.nvmlDevice_t)(unsafe.Pointer(Devices)), cgoAllocsUnknown + __ret := C.nvmlUnitGetDevices(cUnit, cDeviceCount, cDevices) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetHicVersion function as declared in nvml/nvml.h +func nvmlSystemGetHicVersion(HwbcCount *uint32, HwbcEntries *HwbcEntry) Return { + cHwbcCount, _ := (*C.uint)(unsafe.Pointer(HwbcCount)), cgoAllocsUnknown + cHwbcEntries, _ := (*C.nvmlHwbcEntry_t)(unsafe.Pointer(HwbcEntries)), cgoAllocsUnknown + __ret := C.nvmlSystemGetHicVersion(cHwbcCount, cHwbcEntries) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCount_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetCount_v2(DeviceCount *uint32) Return { + cDeviceCount, _ := (*C.uint)(unsafe.Pointer(DeviceCount)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCount_v2(cDeviceCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAttributes_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetAttributes_v2(Device Device, Attributes *DeviceAttributes) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cAttributes, _ := (*C.nvmlDeviceAttributes_t)(unsafe.Pointer(Attributes)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAttributes_v2(cDevice, cAttributes) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleByIndex_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetHandleByIndex_v2(Index uint32, Device *Device) Return { + cIndex, _ := (C.uint)(Index), cgoAllocsUnknown + cDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleByIndex_v2(cIndex, cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleBySerial function as declared in nvml/nvml.h +func nvmlDeviceGetHandleBySerial(Serial string, Device *Device) Return { + cSerial, _ := unpackPCharString(Serial) + cDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleBySerial(cSerial, cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleByUUID function as declared in nvml/nvml.h +func nvmlDeviceGetHandleByUUID(Uuid string, Device *Device) Return { + cUuid, _ := unpackPCharString(Uuid) + cDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleByUUID(cUuid, cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleByPciBusId_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetHandleByPciBusId_v2(PciBusId string, Device *Device) Return { + cPciBusId, _ := unpackPCharString(PciBusId) + cDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleByPciBusId_v2(cPciBusId, cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetName function as declared in nvml/nvml.h +func nvmlDeviceGetName(Device Device, Name *byte, Length uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cName, _ := (*C.char)(unsafe.Pointer(Name)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetName(cDevice, cName, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBrand function as declared in nvml/nvml.h +func nvmlDeviceGetBrand(Device Device, _type *BrandType) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + c_type, _ := (*C.nvmlBrandType_t)(unsafe.Pointer(_type)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBrand(cDevice, c_type) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetIndex function as declared in nvml/nvml.h +func nvmlDeviceGetIndex(Device Device, Index *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cIndex, _ := (*C.uint)(unsafe.Pointer(Index)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetIndex(cDevice, cIndex) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSerial function as declared in nvml/nvml.h +func nvmlDeviceGetSerial(Device Device, Serial *byte, Length uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cSerial, _ := (*C.char)(unsafe.Pointer(Serial)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSerial(cDevice, cSerial, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemoryAffinity function as declared in nvml/nvml.h +func nvmlDeviceGetMemoryAffinity(Device Device, NodeSetSize uint32, NodeSet *uint, Scope AffinityScope) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cNodeSetSize, _ := (C.uint)(NodeSetSize), cgoAllocsUnknown + cNodeSet, _ := (*C.ulong)(unsafe.Pointer(NodeSet)), cgoAllocsUnknown + cScope, _ := (C.nvmlAffinityScope_t)(Scope), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemoryAffinity(cDevice, cNodeSetSize, cNodeSet, cScope) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCpuAffinityWithinScope function as declared in nvml/nvml.h +func nvmlDeviceGetCpuAffinityWithinScope(Device Device, CpuSetSize uint32, CpuSet *uint, Scope AffinityScope) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCpuSetSize, _ := (C.uint)(CpuSetSize), cgoAllocsUnknown + cCpuSet, _ := (*C.ulong)(unsafe.Pointer(CpuSet)), cgoAllocsUnknown + cScope, _ := (C.nvmlAffinityScope_t)(Scope), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCpuAffinityWithinScope(cDevice, cCpuSetSize, cCpuSet, cScope) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCpuAffinity function as declared in nvml/nvml.h +func nvmlDeviceGetCpuAffinity(Device Device, CpuSetSize uint32, CpuSet *uint) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCpuSetSize, _ := (C.uint)(CpuSetSize), cgoAllocsUnknown + cCpuSet, _ := (*C.ulong)(unsafe.Pointer(CpuSet)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCpuAffinity(cDevice, cCpuSetSize, cCpuSet) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetCpuAffinity function as declared in nvml/nvml.h +func nvmlDeviceSetCpuAffinity(Device Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetCpuAffinity(cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceClearCpuAffinity function as declared in nvml/nvml.h +func nvmlDeviceClearCpuAffinity(Device Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceClearCpuAffinity(cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTopologyCommonAncestor function as declared in nvml/nvml.h +func nvmlDeviceGetTopologyCommonAncestor(Device1 Device, Device2 Device, PathInfo *GpuTopologyLevel) Return { + cDevice1, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device1)), cgoAllocsUnknown + cDevice2, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device2)), cgoAllocsUnknown + cPathInfo, _ := (*C.nvmlGpuTopologyLevel_t)(unsafe.Pointer(PathInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTopologyCommonAncestor(cDevice1, cDevice2, cPathInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTopologyNearestGpus function as declared in nvml/nvml.h +func nvmlDeviceGetTopologyNearestGpus(Device Device, Level GpuTopologyLevel, Count *uint32, DeviceArray *Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLevel, _ := (C.nvmlGpuTopologyLevel_t)(Level), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cDeviceArray, _ := (*C.nvmlDevice_t)(unsafe.Pointer(DeviceArray)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTopologyNearestGpus(cDevice, cLevel, cCount, cDeviceArray) + __v := (Return)(__ret) + return __v +} + +// nvmlSystemGetTopologyGpuSet function as declared in nvml/nvml.h +func nvmlSystemGetTopologyGpuSet(CpuNumber uint32, Count *uint32, DeviceArray *Device) Return { + cCpuNumber, _ := (C.uint)(CpuNumber), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cDeviceArray, _ := (*C.nvmlDevice_t)(unsafe.Pointer(DeviceArray)), cgoAllocsUnknown + __ret := C.nvmlSystemGetTopologyGpuSet(cCpuNumber, cCount, cDeviceArray) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetP2PStatus function as declared in nvml/nvml.h +func nvmlDeviceGetP2PStatus(Device1 Device, Device2 Device, P2pIndex GpuP2PCapsIndex, P2pStatus *GpuP2PStatus) Return { + cDevice1, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device1)), cgoAllocsUnknown + cDevice2, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device2)), cgoAllocsUnknown + cP2pIndex, _ := (C.nvmlGpuP2PCapsIndex_t)(P2pIndex), cgoAllocsUnknown + cP2pStatus, _ := (*C.nvmlGpuP2PStatus_t)(unsafe.Pointer(P2pStatus)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetP2PStatus(cDevice1, cDevice2, cP2pIndex, cP2pStatus) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetUUID function as declared in nvml/nvml.h +func nvmlDeviceGetUUID(Device Device, Uuid *byte, Length uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cUuid, _ := (*C.char)(unsafe.Pointer(Uuid)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetUUID(cDevice, cUuid, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetMdevUUID function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetMdevUUID(VgpuInstance VgpuInstance, MdevUuid *byte, Size uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cMdevUuid, _ := (*C.char)(unsafe.Pointer(MdevUuid)), cgoAllocsUnknown + cSize, _ := (C.uint)(Size), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetMdevUUID(cVgpuInstance, cMdevUuid, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMinorNumber function as declared in nvml/nvml.h +func nvmlDeviceGetMinorNumber(Device Device, MinorNumber *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMinorNumber, _ := (*C.uint)(unsafe.Pointer(MinorNumber)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMinorNumber(cDevice, cMinorNumber) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBoardPartNumber function as declared in nvml/nvml.h +func nvmlDeviceGetBoardPartNumber(Device Device, PartNumber *byte, Length uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPartNumber, _ := (*C.char)(unsafe.Pointer(PartNumber)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBoardPartNumber(cDevice, cPartNumber, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetInforomVersion function as declared in nvml/nvml.h +func nvmlDeviceGetInforomVersion(Device Device, Object InforomObject, Version *byte, Length uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cObject, _ := (C.nvmlInforomObject_t)(Object), cgoAllocsUnknown + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetInforomVersion(cDevice, cObject, cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetInforomImageVersion function as declared in nvml/nvml.h +func nvmlDeviceGetInforomImageVersion(Device Device, Version *byte, Length uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetInforomImageVersion(cDevice, cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetInforomConfigurationChecksum function as declared in nvml/nvml.h +func nvmlDeviceGetInforomConfigurationChecksum(Device Device, Checksum *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cChecksum, _ := (*C.uint)(unsafe.Pointer(Checksum)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetInforomConfigurationChecksum(cDevice, cChecksum) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceValidateInforom function as declared in nvml/nvml.h +func nvmlDeviceValidateInforom(Device Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceValidateInforom(cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDisplayMode function as declared in nvml/nvml.h +func nvmlDeviceGetDisplayMode(Device Device, Display *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cDisplay, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Display)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDisplayMode(cDevice, cDisplay) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDisplayActive function as declared in nvml/nvml.h +func nvmlDeviceGetDisplayActive(Device Device, IsActive *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cIsActive, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(IsActive)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDisplayActive(cDevice, cIsActive) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPersistenceMode function as declared in nvml/nvml.h +func nvmlDeviceGetPersistenceMode(Device Device, Mode *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Mode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPersistenceMode(cDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPciInfo_v3 function as declared in nvml/nvml.h +func nvmlDeviceGetPciInfo_v3(Device Device, Pci *PciInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPci, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(Pci)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPciInfo_v3(cDevice, cPci) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMaxPcieLinkGeneration function as declared in nvml/nvml.h +func nvmlDeviceGetMaxPcieLinkGeneration(Device Device, MaxLinkGen *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMaxLinkGen, _ := (*C.uint)(unsafe.Pointer(MaxLinkGen)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMaxPcieLinkGeneration(cDevice, cMaxLinkGen) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuMaxPcieLinkGeneration function as declared in nvml/nvml.h +func nvmlDeviceGetGpuMaxPcieLinkGeneration(Device Device, MaxLinkGenDevice *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMaxLinkGenDevice, _ := (*C.uint)(unsafe.Pointer(MaxLinkGenDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuMaxPcieLinkGeneration(cDevice, cMaxLinkGenDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMaxPcieLinkWidth function as declared in nvml/nvml.h +func nvmlDeviceGetMaxPcieLinkWidth(Device Device, MaxLinkWidth *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMaxLinkWidth, _ := (*C.uint)(unsafe.Pointer(MaxLinkWidth)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMaxPcieLinkWidth(cDevice, cMaxLinkWidth) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCurrPcieLinkGeneration function as declared in nvml/nvml.h +func nvmlDeviceGetCurrPcieLinkGeneration(Device Device, CurrLinkGen *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCurrLinkGen, _ := (*C.uint)(unsafe.Pointer(CurrLinkGen)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCurrPcieLinkGeneration(cDevice, cCurrLinkGen) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCurrPcieLinkWidth function as declared in nvml/nvml.h +func nvmlDeviceGetCurrPcieLinkWidth(Device Device, CurrLinkWidth *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCurrLinkWidth, _ := (*C.uint)(unsafe.Pointer(CurrLinkWidth)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCurrPcieLinkWidth(cDevice, cCurrLinkWidth) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPcieThroughput function as declared in nvml/nvml.h +func nvmlDeviceGetPcieThroughput(Device Device, Counter PcieUtilCounter, Value *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCounter, _ := (C.nvmlPcieUtilCounter_t)(Counter), cgoAllocsUnknown + cValue, _ := (*C.uint)(unsafe.Pointer(Value)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPcieThroughput(cDevice, cCounter, cValue) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPcieReplayCounter function as declared in nvml/nvml.h +func nvmlDeviceGetPcieReplayCounter(Device Device, Value *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cValue, _ := (*C.uint)(unsafe.Pointer(Value)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPcieReplayCounter(cDevice, cValue) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetClockInfo function as declared in nvml/nvml.h +func nvmlDeviceGetClockInfo(Device Device, _type ClockType, Clock *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + c_type, _ := (C.nvmlClockType_t)(_type), cgoAllocsUnknown + cClock, _ := (*C.uint)(unsafe.Pointer(Clock)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetClockInfo(cDevice, c_type, cClock) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMaxClockInfo function as declared in nvml/nvml.h +func nvmlDeviceGetMaxClockInfo(Device Device, _type ClockType, Clock *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + c_type, _ := (C.nvmlClockType_t)(_type), cgoAllocsUnknown + cClock, _ := (*C.uint)(unsafe.Pointer(Clock)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMaxClockInfo(cDevice, c_type, cClock) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetApplicationsClock function as declared in nvml/nvml.h +func nvmlDeviceGetApplicationsClock(Device Device, ClockType ClockType, ClockMHz *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cClockType, _ := (C.nvmlClockType_t)(ClockType), cgoAllocsUnknown + cClockMHz, _ := (*C.uint)(unsafe.Pointer(ClockMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetApplicationsClock(cDevice, cClockType, cClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDefaultApplicationsClock function as declared in nvml/nvml.h +func nvmlDeviceGetDefaultApplicationsClock(Device Device, ClockType ClockType, ClockMHz *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cClockType, _ := (C.nvmlClockType_t)(ClockType), cgoAllocsUnknown + cClockMHz, _ := (*C.uint)(unsafe.Pointer(ClockMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDefaultApplicationsClock(cDevice, cClockType, cClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceResetApplicationsClocks function as declared in nvml/nvml.h +func nvmlDeviceResetApplicationsClocks(Device Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceResetApplicationsClocks(cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetClock function as declared in nvml/nvml.h +func nvmlDeviceGetClock(Device Device, ClockType ClockType, ClockId ClockId, ClockMHz *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cClockType, _ := (C.nvmlClockType_t)(ClockType), cgoAllocsUnknown + cClockId, _ := (C.nvmlClockId_t)(ClockId), cgoAllocsUnknown + cClockMHz, _ := (*C.uint)(unsafe.Pointer(ClockMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetClock(cDevice, cClockType, cClockId, cClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMaxCustomerBoostClock function as declared in nvml/nvml.h +func nvmlDeviceGetMaxCustomerBoostClock(Device Device, ClockType ClockType, ClockMHz *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cClockType, _ := (C.nvmlClockType_t)(ClockType), cgoAllocsUnknown + cClockMHz, _ := (*C.uint)(unsafe.Pointer(ClockMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMaxCustomerBoostClock(cDevice, cClockType, cClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedMemoryClocks function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedMemoryClocks(Device Device, Count *uint32, ClocksMHz *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cClocksMHz, _ := (*C.uint)(unsafe.Pointer(ClocksMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedMemoryClocks(cDevice, cCount, cClocksMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedGraphicsClocks function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedGraphicsClocks(Device Device, MemoryClockMHz uint32, Count *uint32, ClocksMHz *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMemoryClockMHz, _ := (C.uint)(MemoryClockMHz), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cClocksMHz, _ := (*C.uint)(unsafe.Pointer(ClocksMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedGraphicsClocks(cDevice, cMemoryClockMHz, cCount, cClocksMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAutoBoostedClocksEnabled function as declared in nvml/nvml.h +func nvmlDeviceGetAutoBoostedClocksEnabled(Device Device, IsEnabled *EnableState, DefaultIsEnabled *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cIsEnabled, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(IsEnabled)), cgoAllocsUnknown + cDefaultIsEnabled, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(DefaultIsEnabled)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAutoBoostedClocksEnabled(cDevice, cIsEnabled, cDefaultIsEnabled) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetAutoBoostedClocksEnabled function as declared in nvml/nvml.h +func nvmlDeviceSetAutoBoostedClocksEnabled(Device Device, Enabled EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cEnabled, _ := (C.nvmlEnableState_t)(Enabled), cgoAllocsUnknown + __ret := C.nvmlDeviceSetAutoBoostedClocksEnabled(cDevice, cEnabled) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetDefaultAutoBoostedClocksEnabled function as declared in nvml/nvml.h +func nvmlDeviceSetDefaultAutoBoostedClocksEnabled(Device Device, Enabled EnableState, Flags uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cEnabled, _ := (C.nvmlEnableState_t)(Enabled), cgoAllocsUnknown + cFlags, _ := (C.uint)(Flags), cgoAllocsUnknown + __ret := C.nvmlDeviceSetDefaultAutoBoostedClocksEnabled(cDevice, cEnabled, cFlags) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetFanSpeed function as declared in nvml/nvml.h +func nvmlDeviceGetFanSpeed(Device Device, Speed *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cSpeed, _ := (*C.uint)(unsafe.Pointer(Speed)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFanSpeed(cDevice, cSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetFanSpeed_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetFanSpeed_v2(Device Device, Fan uint32, Speed *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cFan, _ := (C.uint)(Fan), cgoAllocsUnknown + cSpeed, _ := (*C.uint)(unsafe.Pointer(Speed)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFanSpeed_v2(cDevice, cFan, cSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTargetFanSpeed function as declared in nvml/nvml.h +func nvmlDeviceGetTargetFanSpeed(Device Device, Fan uint32, TargetSpeed *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cFan, _ := (C.uint)(Fan), cgoAllocsUnknown + cTargetSpeed, _ := (*C.uint)(unsafe.Pointer(TargetSpeed)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTargetFanSpeed(cDevice, cFan, cTargetSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetDefaultFanSpeed_v2 function as declared in nvml/nvml.h +func nvmlDeviceSetDefaultFanSpeed_v2(Device Device, Fan uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cFan, _ := (C.uint)(Fan), cgoAllocsUnknown + __ret := C.nvmlDeviceSetDefaultFanSpeed_v2(cDevice, cFan) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMinMaxFanSpeed function as declared in nvml/nvml.h +func nvmlDeviceGetMinMaxFanSpeed(Device Device, MinSpeed *uint32, MaxSpeed *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMinSpeed, _ := (*C.uint)(unsafe.Pointer(MinSpeed)), cgoAllocsUnknown + cMaxSpeed, _ := (*C.uint)(unsafe.Pointer(MaxSpeed)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMinMaxFanSpeed(cDevice, cMinSpeed, cMaxSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetFanControlPolicy_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetFanControlPolicy_v2(Device Device, Fan uint32, Policy *FanControlPolicy) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cFan, _ := (C.uint)(Fan), cgoAllocsUnknown + cPolicy, _ := (*C.nvmlFanControlPolicy_t)(unsafe.Pointer(Policy)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFanControlPolicy_v2(cDevice, cFan, cPolicy) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetFanControlPolicy function as declared in nvml/nvml.h +func nvmlDeviceSetFanControlPolicy(Device Device, Fan uint32, Policy FanControlPolicy) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cFan, _ := (C.uint)(Fan), cgoAllocsUnknown + cPolicy, _ := (C.nvmlFanControlPolicy_t)(Policy), cgoAllocsUnknown + __ret := C.nvmlDeviceSetFanControlPolicy(cDevice, cFan, cPolicy) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNumFans function as declared in nvml/nvml.h +func nvmlDeviceGetNumFans(Device Device, NumFans *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cNumFans, _ := (*C.uint)(unsafe.Pointer(NumFans)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNumFans(cDevice, cNumFans) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTemperature function as declared in nvml/nvml.h +func nvmlDeviceGetTemperature(Device Device, SensorType TemperatureSensors, Temp *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cSensorType, _ := (C.nvmlTemperatureSensors_t)(SensorType), cgoAllocsUnknown + cTemp, _ := (*C.uint)(unsafe.Pointer(Temp)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTemperature(cDevice, cSensorType, cTemp) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTemperatureThreshold function as declared in nvml/nvml.h +func nvmlDeviceGetTemperatureThreshold(Device Device, ThresholdType TemperatureThresholds, Temp *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cThresholdType, _ := (C.nvmlTemperatureThresholds_t)(ThresholdType), cgoAllocsUnknown + cTemp, _ := (*C.uint)(unsafe.Pointer(Temp)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTemperatureThreshold(cDevice, cThresholdType, cTemp) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetTemperatureThreshold function as declared in nvml/nvml.h +func nvmlDeviceSetTemperatureThreshold(Device Device, ThresholdType TemperatureThresholds, Temp *int32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cThresholdType, _ := (C.nvmlTemperatureThresholds_t)(ThresholdType), cgoAllocsUnknown + cTemp, _ := (*C.int)(unsafe.Pointer(Temp)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetTemperatureThreshold(cDevice, cThresholdType, cTemp) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetThermalSettings function as declared in nvml/nvml.h +func nvmlDeviceGetThermalSettings(Device Device, SensorIndex uint32, PThermalSettings *GpuThermalSettings) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cSensorIndex, _ := (C.uint)(SensorIndex), cgoAllocsUnknown + cPThermalSettings, _ := (*C.nvmlGpuThermalSettings_t)(unsafe.Pointer(PThermalSettings)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetThermalSettings(cDevice, cSensorIndex, cPThermalSettings) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPerformanceState function as declared in nvml/nvml.h +func nvmlDeviceGetPerformanceState(Device Device, PState *Pstates) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPState, _ := (*C.nvmlPstates_t)(unsafe.Pointer(PState)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPerformanceState(cDevice, cPState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCurrentClocksThrottleReasons function as declared in nvml/nvml.h +func nvmlDeviceGetCurrentClocksThrottleReasons(Device Device, ClocksThrottleReasons *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cClocksThrottleReasons, _ := (*C.ulonglong)(unsafe.Pointer(ClocksThrottleReasons)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCurrentClocksThrottleReasons(cDevice, cClocksThrottleReasons) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedClocksThrottleReasons function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedClocksThrottleReasons(Device Device, SupportedClocksThrottleReasons *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cSupportedClocksThrottleReasons, _ := (*C.ulonglong)(unsafe.Pointer(SupportedClocksThrottleReasons)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedClocksThrottleReasons(cDevice, cSupportedClocksThrottleReasons) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerState function as declared in nvml/nvml.h +func nvmlDeviceGetPowerState(Device Device, PState *Pstates) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPState, _ := (*C.nvmlPstates_t)(unsafe.Pointer(PState)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerState(cDevice, cPState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerManagementMode function as declared in nvml/nvml.h +func nvmlDeviceGetPowerManagementMode(Device Device, Mode *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Mode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerManagementMode(cDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerManagementLimit function as declared in nvml/nvml.h +func nvmlDeviceGetPowerManagementLimit(Device Device, Limit *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLimit, _ := (*C.uint)(unsafe.Pointer(Limit)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerManagementLimit(cDevice, cLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerManagementLimitConstraints function as declared in nvml/nvml.h +func nvmlDeviceGetPowerManagementLimitConstraints(Device Device, MinLimit *uint32, MaxLimit *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMinLimit, _ := (*C.uint)(unsafe.Pointer(MinLimit)), cgoAllocsUnknown + cMaxLimit, _ := (*C.uint)(unsafe.Pointer(MaxLimit)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerManagementLimitConstraints(cDevice, cMinLimit, cMaxLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerManagementDefaultLimit function as declared in nvml/nvml.h +func nvmlDeviceGetPowerManagementDefaultLimit(Device Device, DefaultLimit *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cDefaultLimit, _ := (*C.uint)(unsafe.Pointer(DefaultLimit)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerManagementDefaultLimit(cDevice, cDefaultLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerUsage function as declared in nvml/nvml.h +func nvmlDeviceGetPowerUsage(Device Device, Power *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPower, _ := (*C.uint)(unsafe.Pointer(Power)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerUsage(cDevice, cPower) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTotalEnergyConsumption function as declared in nvml/nvml.h +func nvmlDeviceGetTotalEnergyConsumption(Device Device, Energy *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cEnergy, _ := (*C.ulonglong)(unsafe.Pointer(Energy)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTotalEnergyConsumption(cDevice, cEnergy) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEnforcedPowerLimit function as declared in nvml/nvml.h +func nvmlDeviceGetEnforcedPowerLimit(Device Device, Limit *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLimit, _ := (*C.uint)(unsafe.Pointer(Limit)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEnforcedPowerLimit(cDevice, cLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuOperationMode function as declared in nvml/nvml.h +func nvmlDeviceGetGpuOperationMode(Device Device, Current *GpuOperationMode, Pending *GpuOperationMode) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCurrent, _ := (*C.nvmlGpuOperationMode_t)(unsafe.Pointer(Current)), cgoAllocsUnknown + cPending, _ := (*C.nvmlGpuOperationMode_t)(unsafe.Pointer(Pending)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuOperationMode(cDevice, cCurrent, cPending) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemoryInfo function as declared in nvml/nvml.h +func nvmlDeviceGetMemoryInfo(Device Device, Memory *Memory) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMemory, _ := (*C.nvmlMemory_t)(unsafe.Pointer(Memory)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemoryInfo(cDevice, cMemory) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemoryInfo_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetMemoryInfo_v2(Device Device, Memory *Memory_v2) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMemory, _ := (*C.nvmlMemory_v2_t)(unsafe.Pointer(Memory)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemoryInfo_v2(cDevice, cMemory) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetComputeMode function as declared in nvml/nvml.h +func nvmlDeviceGetComputeMode(Device Device, Mode *ComputeMode) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (*C.nvmlComputeMode_t)(unsafe.Pointer(Mode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetComputeMode(cDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCudaComputeCapability function as declared in nvml/nvml.h +func nvmlDeviceGetCudaComputeCapability(Device Device, Major *int32, Minor *int32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMajor, _ := (*C.int)(unsafe.Pointer(Major)), cgoAllocsUnknown + cMinor, _ := (*C.int)(unsafe.Pointer(Minor)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCudaComputeCapability(cDevice, cMajor, cMinor) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEccMode function as declared in nvml/nvml.h +func nvmlDeviceGetEccMode(Device Device, Current *EnableState, Pending *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCurrent, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Current)), cgoAllocsUnknown + cPending, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Pending)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEccMode(cDevice, cCurrent, cPending) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDefaultEccMode function as declared in nvml/nvml.h +func nvmlDeviceGetDefaultEccMode(Device Device, DefaultMode *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cDefaultMode, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(DefaultMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDefaultEccMode(cDevice, cDefaultMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBoardId function as declared in nvml/nvml.h +func nvmlDeviceGetBoardId(Device Device, BoardId *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cBoardId, _ := (*C.uint)(unsafe.Pointer(BoardId)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBoardId(cDevice, cBoardId) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMultiGpuBoard function as declared in nvml/nvml.h +func nvmlDeviceGetMultiGpuBoard(Device Device, MultiGpuBool *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMultiGpuBool, _ := (*C.uint)(unsafe.Pointer(MultiGpuBool)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMultiGpuBoard(cDevice, cMultiGpuBool) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetTotalEccErrors function as declared in nvml/nvml.h +func nvmlDeviceGetTotalEccErrors(Device Device, ErrorType MemoryErrorType, CounterType EccCounterType, EccCounts *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cErrorType, _ := (C.nvmlMemoryErrorType_t)(ErrorType), cgoAllocsUnknown + cCounterType, _ := (C.nvmlEccCounterType_t)(CounterType), cgoAllocsUnknown + cEccCounts, _ := (*C.ulonglong)(unsafe.Pointer(EccCounts)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetTotalEccErrors(cDevice, cErrorType, cCounterType, cEccCounts) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDetailedEccErrors function as declared in nvml/nvml.h +func nvmlDeviceGetDetailedEccErrors(Device Device, ErrorType MemoryErrorType, CounterType EccCounterType, EccCounts *EccErrorCounts) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cErrorType, _ := (C.nvmlMemoryErrorType_t)(ErrorType), cgoAllocsUnknown + cCounterType, _ := (C.nvmlEccCounterType_t)(CounterType), cgoAllocsUnknown + cEccCounts, _ := (*C.nvmlEccErrorCounts_t)(unsafe.Pointer(EccCounts)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDetailedEccErrors(cDevice, cErrorType, cCounterType, cEccCounts) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemoryErrorCounter function as declared in nvml/nvml.h +func nvmlDeviceGetMemoryErrorCounter(Device Device, ErrorType MemoryErrorType, CounterType EccCounterType, LocationType MemoryLocation, Count *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cErrorType, _ := (C.nvmlMemoryErrorType_t)(ErrorType), cgoAllocsUnknown + cCounterType, _ := (C.nvmlEccCounterType_t)(CounterType), cgoAllocsUnknown + cLocationType, _ := (C.nvmlMemoryLocation_t)(LocationType), cgoAllocsUnknown + cCount, _ := (*C.ulonglong)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemoryErrorCounter(cDevice, cErrorType, cCounterType, cLocationType, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetUtilizationRates function as declared in nvml/nvml.h +func nvmlDeviceGetUtilizationRates(Device Device, Utilization *Utilization) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cUtilization, _ := (*C.nvmlUtilization_t)(unsafe.Pointer(Utilization)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetUtilizationRates(cDevice, cUtilization) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEncoderUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetEncoderUtilization(Device Device, Utilization *uint32, SamplingPeriodUs *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cUtilization, _ := (*C.uint)(unsafe.Pointer(Utilization)), cgoAllocsUnknown + cSamplingPeriodUs, _ := (*C.uint)(unsafe.Pointer(SamplingPeriodUs)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEncoderUtilization(cDevice, cUtilization, cSamplingPeriodUs) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEncoderCapacity function as declared in nvml/nvml.h +func nvmlDeviceGetEncoderCapacity(Device Device, EncoderQueryType EncoderType, EncoderCapacity *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cEncoderQueryType, _ := (C.nvmlEncoderType_t)(EncoderQueryType), cgoAllocsUnknown + cEncoderCapacity, _ := (*C.uint)(unsafe.Pointer(EncoderCapacity)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEncoderCapacity(cDevice, cEncoderQueryType, cEncoderCapacity) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEncoderStats function as declared in nvml/nvml.h +func nvmlDeviceGetEncoderStats(Device Device, SessionCount *uint32, AverageFps *uint32, AverageLatency *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cAverageFps, _ := (*C.uint)(unsafe.Pointer(AverageFps)), cgoAllocsUnknown + cAverageLatency, _ := (*C.uint)(unsafe.Pointer(AverageLatency)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEncoderStats(cDevice, cSessionCount, cAverageFps, cAverageLatency) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetEncoderSessions function as declared in nvml/nvml.h +func nvmlDeviceGetEncoderSessions(Device Device, SessionCount *uint32, SessionInfos *EncoderSessionInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cSessionInfos, _ := (*C.nvmlEncoderSessionInfo_t)(unsafe.Pointer(SessionInfos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetEncoderSessions(cDevice, cSessionCount, cSessionInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDecoderUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetDecoderUtilization(Device Device, Utilization *uint32, SamplingPeriodUs *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cUtilization, _ := (*C.uint)(unsafe.Pointer(Utilization)), cgoAllocsUnknown + cSamplingPeriodUs, _ := (*C.uint)(unsafe.Pointer(SamplingPeriodUs)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDecoderUtilization(cDevice, cUtilization, cSamplingPeriodUs) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetFBCStats function as declared in nvml/nvml.h +func nvmlDeviceGetFBCStats(Device Device, FbcStats *FBCStats) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cFbcStats, _ := (*C.nvmlFBCStats_t)(unsafe.Pointer(FbcStats)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFBCStats(cDevice, cFbcStats) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetFBCSessions function as declared in nvml/nvml.h +func nvmlDeviceGetFBCSessions(Device Device, SessionCount *uint32, SessionInfo *FBCSessionInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cSessionInfo, _ := (*C.nvmlFBCSessionInfo_t)(unsafe.Pointer(SessionInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFBCSessions(cDevice, cSessionCount, cSessionInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDriverModel function as declared in nvml/nvml.h +func nvmlDeviceGetDriverModel(Device Device, Current *DriverModel, Pending *DriverModel) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCurrent, _ := (*C.nvmlDriverModel_t)(unsafe.Pointer(Current)), cgoAllocsUnknown + cPending, _ := (*C.nvmlDriverModel_t)(unsafe.Pointer(Pending)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDriverModel(cDevice, cCurrent, cPending) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVbiosVersion function as declared in nvml/nvml.h +func nvmlDeviceGetVbiosVersion(Device Device, Version *byte, Length uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVbiosVersion(cDevice, cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBridgeChipInfo function as declared in nvml/nvml.h +func nvmlDeviceGetBridgeChipInfo(Device Device, BridgeHierarchy *BridgeChipHierarchy) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cBridgeHierarchy, _ := (*C.nvmlBridgeChipHierarchy_t)(unsafe.Pointer(BridgeHierarchy)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBridgeChipInfo(cDevice, cBridgeHierarchy) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetComputeRunningProcesses_v3 function as declared in nvml/nvml.h +func nvmlDeviceGetComputeRunningProcesses_v3(Device Device, InfoCount *uint32, Infos *ProcessInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetComputeRunningProcesses_v3(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGraphicsRunningProcesses_v3 function as declared in nvml/nvml.h +func nvmlDeviceGetGraphicsRunningProcesses_v3(Device Device, InfoCount *uint32, Infos *ProcessInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGraphicsRunningProcesses_v3(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMPSComputeRunningProcesses_v3 function as declared in nvml/nvml.h +func nvmlDeviceGetMPSComputeRunningProcesses_v3(Device Device, InfoCount *uint32, Infos *ProcessInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMPSComputeRunningProcesses_v3(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceOnSameBoard function as declared in nvml/nvml.h +func nvmlDeviceOnSameBoard(Device1 Device, Device2 Device, OnSameBoard *int32) Return { + cDevice1, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device1)), cgoAllocsUnknown + cDevice2, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device2)), cgoAllocsUnknown + cOnSameBoard, _ := (*C.int)(unsafe.Pointer(OnSameBoard)), cgoAllocsUnknown + __ret := C.nvmlDeviceOnSameBoard(cDevice1, cDevice2, cOnSameBoard) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAPIRestriction function as declared in nvml/nvml.h +func nvmlDeviceGetAPIRestriction(Device Device, ApiType RestrictedAPI, IsRestricted *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cApiType, _ := (C.nvmlRestrictedAPI_t)(ApiType), cgoAllocsUnknown + cIsRestricted, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(IsRestricted)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAPIRestriction(cDevice, cApiType, cIsRestricted) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSamples function as declared in nvml/nvml.h +func nvmlDeviceGetSamples(Device Device, _type SamplingType, LastSeenTimeStamp uint64, SampleValType *ValueType, SampleCount *uint32, Samples *Sample) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + c_type, _ := (C.nvmlSamplingType_t)(_type), cgoAllocsUnknown + cLastSeenTimeStamp, _ := (C.ulonglong)(LastSeenTimeStamp), cgoAllocsUnknown + cSampleValType, _ := (*C.nvmlValueType_t)(unsafe.Pointer(SampleValType)), cgoAllocsUnknown + cSampleCount, _ := (*C.uint)(unsafe.Pointer(SampleCount)), cgoAllocsUnknown + cSamples, _ := (*C.nvmlSample_t)(unsafe.Pointer(Samples)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSamples(cDevice, c_type, cLastSeenTimeStamp, cSampleValType, cSampleCount, cSamples) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBAR1MemoryInfo function as declared in nvml/nvml.h +func nvmlDeviceGetBAR1MemoryInfo(Device Device, Bar1Memory *BAR1Memory) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cBar1Memory, _ := (*C.nvmlBAR1Memory_t)(unsafe.Pointer(Bar1Memory)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBAR1MemoryInfo(cDevice, cBar1Memory) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetViolationStatus function as declared in nvml/nvml.h +func nvmlDeviceGetViolationStatus(Device Device, PerfPolicyType PerfPolicyType, ViolTime *ViolationTime) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPerfPolicyType, _ := (C.nvmlPerfPolicyType_t)(PerfPolicyType), cgoAllocsUnknown + cViolTime, _ := (*C.nvmlViolationTime_t)(unsafe.Pointer(ViolTime)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetViolationStatus(cDevice, cPerfPolicyType, cViolTime) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetIrqNum function as declared in nvml/nvml.h +func nvmlDeviceGetIrqNum(Device Device, IrqNum *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cIrqNum, _ := (*C.uint)(unsafe.Pointer(IrqNum)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetIrqNum(cDevice, cIrqNum) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNumGpuCores function as declared in nvml/nvml.h +func nvmlDeviceGetNumGpuCores(Device Device, NumCores *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cNumCores, _ := (*C.uint)(unsafe.Pointer(NumCores)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNumGpuCores(cDevice, cNumCores) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPowerSource function as declared in nvml/nvml.h +func nvmlDeviceGetPowerSource(Device Device, PowerSource *PowerSource) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPowerSource, _ := (*C.nvmlPowerSource_t)(unsafe.Pointer(PowerSource)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPowerSource(cDevice, cPowerSource) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemoryBusWidth function as declared in nvml/nvml.h +func nvmlDeviceGetMemoryBusWidth(Device Device, BusWidth *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cBusWidth, _ := (*C.uint)(unsafe.Pointer(BusWidth)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemoryBusWidth(cDevice, cBusWidth) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPcieLinkMaxSpeed function as declared in nvml/nvml.h +func nvmlDeviceGetPcieLinkMaxSpeed(Device Device, MaxSpeed *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMaxSpeed, _ := (*C.uint)(unsafe.Pointer(MaxSpeed)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPcieLinkMaxSpeed(cDevice, cMaxSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPcieSpeed function as declared in nvml/nvml.h +func nvmlDeviceGetPcieSpeed(Device Device, PcieSpeed *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPcieSpeed, _ := (*C.uint)(unsafe.Pointer(PcieSpeed)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPcieSpeed(cDevice, cPcieSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAdaptiveClockInfoStatus function as declared in nvml/nvml.h +func nvmlDeviceGetAdaptiveClockInfoStatus(Device Device, AdaptiveClockStatus *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cAdaptiveClockStatus, _ := (*C.uint)(unsafe.Pointer(AdaptiveClockStatus)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAdaptiveClockInfoStatus(cDevice, cAdaptiveClockStatus) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAccountingMode function as declared in nvml/nvml.h +func nvmlDeviceGetAccountingMode(Device Device, Mode *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Mode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAccountingMode(cDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAccountingStats function as declared in nvml/nvml.h +func nvmlDeviceGetAccountingStats(Device Device, Pid uint32, Stats *AccountingStats) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPid, _ := (C.uint)(Pid), cgoAllocsUnknown + cStats, _ := (*C.nvmlAccountingStats_t)(unsafe.Pointer(Stats)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAccountingStats(cDevice, cPid, cStats) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAccountingPids function as declared in nvml/nvml.h +func nvmlDeviceGetAccountingPids(Device Device, Count *uint32, Pids *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cPids, _ := (*C.uint)(unsafe.Pointer(Pids)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAccountingPids(cDevice, cCount, cPids) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAccountingBufferSize function as declared in nvml/nvml.h +func nvmlDeviceGetAccountingBufferSize(Device Device, BufferSize *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cBufferSize, _ := (*C.uint)(unsafe.Pointer(BufferSize)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAccountingBufferSize(cDevice, cBufferSize) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetRetiredPages function as declared in nvml/nvml.h +func nvmlDeviceGetRetiredPages(Device Device, Cause PageRetirementCause, PageCount *uint32, Addresses *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCause, _ := (C.nvmlPageRetirementCause_t)(Cause), cgoAllocsUnknown + cPageCount, _ := (*C.uint)(unsafe.Pointer(PageCount)), cgoAllocsUnknown + cAddresses, _ := (*C.ulonglong)(unsafe.Pointer(Addresses)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetRetiredPages(cDevice, cCause, cPageCount, cAddresses) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetRetiredPages_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetRetiredPages_v2(Device Device, Cause PageRetirementCause, PageCount *uint32, Addresses *uint64, Timestamps *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCause, _ := (C.nvmlPageRetirementCause_t)(Cause), cgoAllocsUnknown + cPageCount, _ := (*C.uint)(unsafe.Pointer(PageCount)), cgoAllocsUnknown + cAddresses, _ := (*C.ulonglong)(unsafe.Pointer(Addresses)), cgoAllocsUnknown + cTimestamps, _ := (*C.ulonglong)(unsafe.Pointer(Timestamps)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetRetiredPages_v2(cDevice, cCause, cPageCount, cAddresses, cTimestamps) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetRetiredPagesPendingStatus function as declared in nvml/nvml.h +func nvmlDeviceGetRetiredPagesPendingStatus(Device Device, IsPending *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cIsPending, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(IsPending)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetRetiredPagesPendingStatus(cDevice, cIsPending) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetRemappedRows function as declared in nvml/nvml.h +func nvmlDeviceGetRemappedRows(Device Device, CorrRows *uint32, UncRows *uint32, IsPending *uint32, FailureOccurred *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCorrRows, _ := (*C.uint)(unsafe.Pointer(CorrRows)), cgoAllocsUnknown + cUncRows, _ := (*C.uint)(unsafe.Pointer(UncRows)), cgoAllocsUnknown + cIsPending, _ := (*C.uint)(unsafe.Pointer(IsPending)), cgoAllocsUnknown + cFailureOccurred, _ := (*C.uint)(unsafe.Pointer(FailureOccurred)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetRemappedRows(cDevice, cCorrRows, cUncRows, cIsPending, cFailureOccurred) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetRowRemapperHistogram function as declared in nvml/nvml.h +func nvmlDeviceGetRowRemapperHistogram(Device Device, Values *RowRemapperHistogramValues) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cValues, _ := (*C.nvmlRowRemapperHistogramValues_t)(unsafe.Pointer(Values)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetRowRemapperHistogram(cDevice, cValues) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetArchitecture function as declared in nvml/nvml.h +func nvmlDeviceGetArchitecture(Device Device, Arch *DeviceArchitecture) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cArch, _ := (*C.nvmlDeviceArchitecture_t)(unsafe.Pointer(Arch)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetArchitecture(cDevice, cArch) + __v := (Return)(__ret) + return __v +} + +// nvmlUnitSetLedState function as declared in nvml/nvml.h +func nvmlUnitSetLedState(Unit Unit, Color LedColor) Return { + cUnit, _ := *(*C.nvmlUnit_t)(unsafe.Pointer(&Unit)), cgoAllocsUnknown + cColor, _ := (C.nvmlLedColor_t)(Color), cgoAllocsUnknown + __ret := C.nvmlUnitSetLedState(cUnit, cColor) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetPersistenceMode function as declared in nvml/nvml.h +func nvmlDeviceSetPersistenceMode(Device Device, Mode EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (C.nvmlEnableState_t)(Mode), cgoAllocsUnknown + __ret := C.nvmlDeviceSetPersistenceMode(cDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetComputeMode function as declared in nvml/nvml.h +func nvmlDeviceSetComputeMode(Device Device, Mode ComputeMode) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (C.nvmlComputeMode_t)(Mode), cgoAllocsUnknown + __ret := C.nvmlDeviceSetComputeMode(cDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetEccMode function as declared in nvml/nvml.h +func nvmlDeviceSetEccMode(Device Device, Ecc EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cEcc, _ := (C.nvmlEnableState_t)(Ecc), cgoAllocsUnknown + __ret := C.nvmlDeviceSetEccMode(cDevice, cEcc) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceClearEccErrorCounts function as declared in nvml/nvml.h +func nvmlDeviceClearEccErrorCounts(Device Device, CounterType EccCounterType) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCounterType, _ := (C.nvmlEccCounterType_t)(CounterType), cgoAllocsUnknown + __ret := C.nvmlDeviceClearEccErrorCounts(cDevice, cCounterType) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetDriverModel function as declared in nvml/nvml.h +func nvmlDeviceSetDriverModel(Device Device, DriverModel DriverModel, Flags uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cDriverModel, _ := (C.nvmlDriverModel_t)(DriverModel), cgoAllocsUnknown + cFlags, _ := (C.uint)(Flags), cgoAllocsUnknown + __ret := C.nvmlDeviceSetDriverModel(cDevice, cDriverModel, cFlags) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetGpuLockedClocks function as declared in nvml/nvml.h +func nvmlDeviceSetGpuLockedClocks(Device Device, MinGpuClockMHz uint32, MaxGpuClockMHz uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMinGpuClockMHz, _ := (C.uint)(MinGpuClockMHz), cgoAllocsUnknown + cMaxGpuClockMHz, _ := (C.uint)(MaxGpuClockMHz), cgoAllocsUnknown + __ret := C.nvmlDeviceSetGpuLockedClocks(cDevice, cMinGpuClockMHz, cMaxGpuClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceResetGpuLockedClocks function as declared in nvml/nvml.h +func nvmlDeviceResetGpuLockedClocks(Device Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceResetGpuLockedClocks(cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetMemoryLockedClocks function as declared in nvml/nvml.h +func nvmlDeviceSetMemoryLockedClocks(Device Device, MinMemClockMHz uint32, MaxMemClockMHz uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMinMemClockMHz, _ := (C.uint)(MinMemClockMHz), cgoAllocsUnknown + cMaxMemClockMHz, _ := (C.uint)(MaxMemClockMHz), cgoAllocsUnknown + __ret := C.nvmlDeviceSetMemoryLockedClocks(cDevice, cMinMemClockMHz, cMaxMemClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceResetMemoryLockedClocks function as declared in nvml/nvml.h +func nvmlDeviceResetMemoryLockedClocks(Device Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceResetMemoryLockedClocks(cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetApplicationsClocks function as declared in nvml/nvml.h +func nvmlDeviceSetApplicationsClocks(Device Device, MemClockMHz uint32, GraphicsClockMHz uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMemClockMHz, _ := (C.uint)(MemClockMHz), cgoAllocsUnknown + cGraphicsClockMHz, _ := (C.uint)(GraphicsClockMHz), cgoAllocsUnknown + __ret := C.nvmlDeviceSetApplicationsClocks(cDevice, cMemClockMHz, cGraphicsClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetClkMonStatus function as declared in nvml/nvml.h +func nvmlDeviceGetClkMonStatus(Device Device, Status *ClkMonStatus) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cStatus, _ := (*C.nvmlClkMonStatus_t)(unsafe.Pointer(Status)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetClkMonStatus(cDevice, cStatus) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetPowerManagementLimit function as declared in nvml/nvml.h +func nvmlDeviceSetPowerManagementLimit(Device Device, Limit uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLimit, _ := (C.uint)(Limit), cgoAllocsUnknown + __ret := C.nvmlDeviceSetPowerManagementLimit(cDevice, cLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetGpuOperationMode function as declared in nvml/nvml.h +func nvmlDeviceSetGpuOperationMode(Device Device, Mode GpuOperationMode) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (C.nvmlGpuOperationMode_t)(Mode), cgoAllocsUnknown + __ret := C.nvmlDeviceSetGpuOperationMode(cDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetAPIRestriction function as declared in nvml/nvml.h +func nvmlDeviceSetAPIRestriction(Device Device, ApiType RestrictedAPI, IsRestricted EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cApiType, _ := (C.nvmlRestrictedAPI_t)(ApiType), cgoAllocsUnknown + cIsRestricted, _ := (C.nvmlEnableState_t)(IsRestricted), cgoAllocsUnknown + __ret := C.nvmlDeviceSetAPIRestriction(cDevice, cApiType, cIsRestricted) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetAccountingMode function as declared in nvml/nvml.h +func nvmlDeviceSetAccountingMode(Device Device, Mode EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (C.nvmlEnableState_t)(Mode), cgoAllocsUnknown + __ret := C.nvmlDeviceSetAccountingMode(cDevice, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceClearAccountingPids function as declared in nvml/nvml.h +func nvmlDeviceClearAccountingPids(Device Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceClearAccountingPids(cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkState function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkState(Device Device, Link uint32, IsActive *EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cIsActive, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(IsActive)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkState(cDevice, cLink, cIsActive) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkVersion function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkVersion(Device Device, Link uint32, Version *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cVersion, _ := (*C.uint)(unsafe.Pointer(Version)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkVersion(cDevice, cLink, cVersion) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkCapability function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkCapability(Device Device, Link uint32, Capability NvLinkCapability, CapResult *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCapability, _ := (C.nvmlNvLinkCapability_t)(Capability), cgoAllocsUnknown + cCapResult, _ := (*C.uint)(unsafe.Pointer(CapResult)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkCapability(cDevice, cLink, cCapability, cCapResult) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkRemotePciInfo_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkRemotePciInfo_v2(Device Device, Link uint32, Pci *PciInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cPci, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(Pci)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkRemotePciInfo_v2(cDevice, cLink, cPci) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkErrorCounter function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkErrorCounter(Device Device, Link uint32, Counter NvLinkErrorCounter, CounterValue *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.nvmlNvLinkErrorCounter_t)(Counter), cgoAllocsUnknown + cCounterValue, _ := (*C.ulonglong)(unsafe.Pointer(CounterValue)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkErrorCounter(cDevice, cLink, cCounter, cCounterValue) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceResetNvLinkErrorCounters function as declared in nvml/nvml.h +func nvmlDeviceResetNvLinkErrorCounters(Device Device, Link uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + __ret := C.nvmlDeviceResetNvLinkErrorCounters(cDevice, cLink) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetNvLinkUtilizationControl function as declared in nvml/nvml.h +func nvmlDeviceSetNvLinkUtilizationControl(Device Device, Link uint32, Counter uint32, Control *NvLinkUtilizationControl, Reset uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.uint)(Counter), cgoAllocsUnknown + cControl, _ := (*C.nvmlNvLinkUtilizationControl_t)(unsafe.Pointer(Control)), cgoAllocsUnknown + cReset, _ := (C.uint)(Reset), cgoAllocsUnknown + __ret := C.nvmlDeviceSetNvLinkUtilizationControl(cDevice, cLink, cCounter, cControl, cReset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkUtilizationControl function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkUtilizationControl(Device Device, Link uint32, Counter uint32, Control *NvLinkUtilizationControl) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.uint)(Counter), cgoAllocsUnknown + cControl, _ := (*C.nvmlNvLinkUtilizationControl_t)(unsafe.Pointer(Control)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkUtilizationControl(cDevice, cLink, cCounter, cControl) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkUtilizationCounter function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkUtilizationCounter(Device Device, Link uint32, Counter uint32, Rxcounter *uint64, Txcounter *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.uint)(Counter), cgoAllocsUnknown + cRxcounter, _ := (*C.ulonglong)(unsafe.Pointer(Rxcounter)), cgoAllocsUnknown + cTxcounter, _ := (*C.ulonglong)(unsafe.Pointer(Txcounter)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkUtilizationCounter(cDevice, cLink, cCounter, cRxcounter, cTxcounter) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceFreezeNvLinkUtilizationCounter function as declared in nvml/nvml.h +func nvmlDeviceFreezeNvLinkUtilizationCounter(Device Device, Link uint32, Counter uint32, Freeze EnableState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.uint)(Counter), cgoAllocsUnknown + cFreeze, _ := (C.nvmlEnableState_t)(Freeze), cgoAllocsUnknown + __ret := C.nvmlDeviceFreezeNvLinkUtilizationCounter(cDevice, cLink, cCounter, cFreeze) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceResetNvLinkUtilizationCounter function as declared in nvml/nvml.h +func nvmlDeviceResetNvLinkUtilizationCounter(Device Device, Link uint32, Counter uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cCounter, _ := (C.uint)(Counter), cgoAllocsUnknown + __ret := C.nvmlDeviceResetNvLinkUtilizationCounter(cDevice, cLink, cCounter) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkRemoteDeviceType function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkRemoteDeviceType(Device Device, Link uint32, PNvLinkDeviceType *IntNvLinkDeviceType) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cPNvLinkDeviceType, _ := (*C.nvmlIntNvLinkDeviceType_t)(unsafe.Pointer(PNvLinkDeviceType)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkRemoteDeviceType(cDevice, cLink, cPNvLinkDeviceType) + __v := (Return)(__ret) + return __v +} + +// nvmlEventSetCreate function as declared in nvml/nvml.h +func nvmlEventSetCreate(Set *EventSet) Return { + cSet, _ := (*C.nvmlEventSet_t)(unsafe.Pointer(Set)), cgoAllocsUnknown + __ret := C.nvmlEventSetCreate(cSet) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceRegisterEvents function as declared in nvml/nvml.h +func nvmlDeviceRegisterEvents(Device Device, EventTypes uint64, Set EventSet) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cEventTypes, _ := (C.ulonglong)(EventTypes), cgoAllocsUnknown + cSet, _ := *(*C.nvmlEventSet_t)(unsafe.Pointer(&Set)), cgoAllocsUnknown + __ret := C.nvmlDeviceRegisterEvents(cDevice, cEventTypes, cSet) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedEventTypes function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedEventTypes(Device Device, EventTypes *uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cEventTypes, _ := (*C.ulonglong)(unsafe.Pointer(EventTypes)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedEventTypes(cDevice, cEventTypes) + __v := (Return)(__ret) + return __v +} + +// nvmlEventSetWait_v2 function as declared in nvml/nvml.h +func nvmlEventSetWait_v2(Set EventSet, Data *EventData, Timeoutms uint32) Return { + cSet, _ := *(*C.nvmlEventSet_t)(unsafe.Pointer(&Set)), cgoAllocsUnknown + cData, _ := (*C.nvmlEventData_t)(unsafe.Pointer(Data)), cgoAllocsUnknown + cTimeoutms, _ := (C.uint)(Timeoutms), cgoAllocsUnknown + __ret := C.nvmlEventSetWait_v2(cSet, cData, cTimeoutms) + __v := (Return)(__ret) + return __v +} + +// nvmlEventSetFree function as declared in nvml/nvml.h +func nvmlEventSetFree(Set EventSet) Return { + cSet, _ := *(*C.nvmlEventSet_t)(unsafe.Pointer(&Set)), cgoAllocsUnknown + __ret := C.nvmlEventSetFree(cSet) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceModifyDrainState function as declared in nvml/nvml.h +func nvmlDeviceModifyDrainState(PciInfo *PciInfo, NewState EnableState) Return { + cPciInfo, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(PciInfo)), cgoAllocsUnknown + cNewState, _ := (C.nvmlEnableState_t)(NewState), cgoAllocsUnknown + __ret := C.nvmlDeviceModifyDrainState(cPciInfo, cNewState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceQueryDrainState function as declared in nvml/nvml.h +func nvmlDeviceQueryDrainState(PciInfo *PciInfo, CurrentState *EnableState) Return { + cPciInfo, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(PciInfo)), cgoAllocsUnknown + cCurrentState, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(CurrentState)), cgoAllocsUnknown + __ret := C.nvmlDeviceQueryDrainState(cPciInfo, cCurrentState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceRemoveGpu_v2 function as declared in nvml/nvml.h +func nvmlDeviceRemoveGpu_v2(PciInfo *PciInfo, GpuState DetachGpuState, LinkState PcieLinkState) Return { + cPciInfo, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(PciInfo)), cgoAllocsUnknown + cGpuState, _ := (C.nvmlDetachGpuState_t)(GpuState), cgoAllocsUnknown + cLinkState, _ := (C.nvmlPcieLinkState_t)(LinkState), cgoAllocsUnknown + __ret := C.nvmlDeviceRemoveGpu_v2(cPciInfo, cGpuState, cLinkState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceDiscoverGpus function as declared in nvml/nvml.h +func nvmlDeviceDiscoverGpus(PciInfo *PciInfo) Return { + cPciInfo, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(PciInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceDiscoverGpus(cPciInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetFieldValues function as declared in nvml/nvml.h +func nvmlDeviceGetFieldValues(Device Device, ValuesCount int32, Values *FieldValue) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cValuesCount, _ := (C.int)(ValuesCount), cgoAllocsUnknown + cValues, _ := (*C.nvmlFieldValue_t)(unsafe.Pointer(Values)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetFieldValues(cDevice, cValuesCount, cValues) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceClearFieldValues function as declared in nvml/nvml.h +func nvmlDeviceClearFieldValues(Device Device, ValuesCount int32, Values *FieldValue) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cValuesCount, _ := (C.int)(ValuesCount), cgoAllocsUnknown + cValues, _ := (*C.nvmlFieldValue_t)(unsafe.Pointer(Values)), cgoAllocsUnknown + __ret := C.nvmlDeviceClearFieldValues(cDevice, cValuesCount, cValues) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVirtualizationMode function as declared in nvml/nvml.h +func nvmlDeviceGetVirtualizationMode(Device Device, PVirtualMode *GpuVirtualizationMode) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPVirtualMode, _ := (*C.nvmlGpuVirtualizationMode_t)(unsafe.Pointer(PVirtualMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVirtualizationMode(cDevice, cPVirtualMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHostVgpuMode function as declared in nvml/nvml.h +func nvmlDeviceGetHostVgpuMode(Device Device, PHostVgpuMode *HostVgpuMode) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPHostVgpuMode, _ := (*C.nvmlHostVgpuMode_t)(unsafe.Pointer(PHostVgpuMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHostVgpuMode(cDevice, cPHostVgpuMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetVirtualizationMode function as declared in nvml/nvml.h +func nvmlDeviceSetVirtualizationMode(Device Device, VirtualMode GpuVirtualizationMode) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cVirtualMode, _ := (C.nvmlGpuVirtualizationMode_t)(VirtualMode), cgoAllocsUnknown + __ret := C.nvmlDeviceSetVirtualizationMode(cDevice, cVirtualMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGridLicensableFeatures_v4 function as declared in nvml/nvml.h +func nvmlDeviceGetGridLicensableFeatures_v4(Device Device, PGridLicensableFeatures *GridLicensableFeatures) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPGridLicensableFeatures, _ := (*C.nvmlGridLicensableFeatures_t)(unsafe.Pointer(PGridLicensableFeatures)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGridLicensableFeatures_v4(cDevice, cPGridLicensableFeatures) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetProcessUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetProcessUtilization(Device Device, Utilization *ProcessUtilizationSample, ProcessSamplesCount *uint32, LastSeenTimeStamp uint64) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cUtilization, _ := (*C.nvmlProcessUtilizationSample_t)(unsafe.Pointer(Utilization)), cgoAllocsUnknown + cProcessSamplesCount, _ := (*C.uint)(unsafe.Pointer(ProcessSamplesCount)), cgoAllocsUnknown + cLastSeenTimeStamp, _ := (C.ulonglong)(LastSeenTimeStamp), cgoAllocsUnknown + __ret := C.nvmlDeviceGetProcessUtilization(cDevice, cUtilization, cProcessSamplesCount, cLastSeenTimeStamp) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGspFirmwareVersion function as declared in nvml/nvml.h +func nvmlDeviceGetGspFirmwareVersion(Device Device, Version *byte) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGspFirmwareVersion(cDevice, cVersion) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGspFirmwareMode function as declared in nvml/nvml.h +func nvmlDeviceGetGspFirmwareMode(Device Device, IsEnabled *uint32, DefaultMode *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cIsEnabled, _ := (*C.uint)(unsafe.Pointer(IsEnabled)), cgoAllocsUnknown + cDefaultMode, _ := (*C.uint)(unsafe.Pointer(DefaultMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGspFirmwareMode(cDevice, cIsEnabled, cDefaultMode) + __v := (Return)(__ret) + return __v +} + +// nvmlGetVgpuDriverCapabilities function as declared in nvml/nvml.h +func nvmlGetVgpuDriverCapabilities(Capability VgpuDriverCapability, CapResult *uint32) Return { + cCapability, _ := (C.nvmlVgpuDriverCapability_t)(Capability), cgoAllocsUnknown + cCapResult, _ := (*C.uint)(unsafe.Pointer(CapResult)), cgoAllocsUnknown + __ret := C.nvmlGetVgpuDriverCapabilities(cCapability, cCapResult) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuCapabilities function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuCapabilities(Device Device, Capability DeviceVgpuCapability, CapResult *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCapability, _ := (C.nvmlDeviceVgpuCapability_t)(Capability), cgoAllocsUnknown + cCapResult, _ := (*C.uint)(unsafe.Pointer(CapResult)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuCapabilities(cDevice, cCapability, cCapResult) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedVgpus function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedVgpus(Device Device, VgpuCount *uint32, VgpuTypeIds *VgpuTypeId) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cVgpuCount, _ := (*C.uint)(unsafe.Pointer(VgpuCount)), cgoAllocsUnknown + cVgpuTypeIds, _ := (*C.nvmlVgpuTypeId_t)(unsafe.Pointer(VgpuTypeIds)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedVgpus(cDevice, cVgpuCount, cVgpuTypeIds) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCreatableVgpus function as declared in nvml/nvml.h +func nvmlDeviceGetCreatableVgpus(Device Device, VgpuCount *uint32, VgpuTypeIds *VgpuTypeId) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cVgpuCount, _ := (*C.uint)(unsafe.Pointer(VgpuCount)), cgoAllocsUnknown + cVgpuTypeIds, _ := (*C.nvmlVgpuTypeId_t)(unsafe.Pointer(VgpuTypeIds)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCreatableVgpus(cDevice, cVgpuCount, cVgpuTypeIds) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetClass function as declared in nvml/nvml.h +func nvmlVgpuTypeGetClass(VgpuTypeId VgpuTypeId, VgpuTypeClass *byte, Size *uint32) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cVgpuTypeClass, _ := (*C.char)(unsafe.Pointer(VgpuTypeClass)), cgoAllocsUnknown + cSize, _ := (*C.uint)(unsafe.Pointer(Size)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetClass(cVgpuTypeId, cVgpuTypeClass, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetName function as declared in nvml/nvml.h +func nvmlVgpuTypeGetName(VgpuTypeId VgpuTypeId, VgpuTypeName *byte, Size *uint32) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cVgpuTypeName, _ := (*C.char)(unsafe.Pointer(VgpuTypeName)), cgoAllocsUnknown + cSize, _ := (*C.uint)(unsafe.Pointer(Size)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetName(cVgpuTypeId, cVgpuTypeName, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetGpuInstanceProfileId function as declared in nvml/nvml.h +func nvmlVgpuTypeGetGpuInstanceProfileId(VgpuTypeId VgpuTypeId, GpuInstanceProfileId *uint32) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cGpuInstanceProfileId, _ := (*C.uint)(unsafe.Pointer(GpuInstanceProfileId)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetGpuInstanceProfileId(cVgpuTypeId, cGpuInstanceProfileId) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetDeviceID function as declared in nvml/nvml.h +func nvmlVgpuTypeGetDeviceID(VgpuTypeId VgpuTypeId, DeviceID *uint64, SubsystemID *uint64) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cDeviceID, _ := (*C.ulonglong)(unsafe.Pointer(DeviceID)), cgoAllocsUnknown + cSubsystemID, _ := (*C.ulonglong)(unsafe.Pointer(SubsystemID)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetDeviceID(cVgpuTypeId, cDeviceID, cSubsystemID) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetFramebufferSize function as declared in nvml/nvml.h +func nvmlVgpuTypeGetFramebufferSize(VgpuTypeId VgpuTypeId, FbSize *uint64) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cFbSize, _ := (*C.ulonglong)(unsafe.Pointer(FbSize)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetFramebufferSize(cVgpuTypeId, cFbSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetNumDisplayHeads function as declared in nvml/nvml.h +func nvmlVgpuTypeGetNumDisplayHeads(VgpuTypeId VgpuTypeId, NumDisplayHeads *uint32) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cNumDisplayHeads, _ := (*C.uint)(unsafe.Pointer(NumDisplayHeads)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetNumDisplayHeads(cVgpuTypeId, cNumDisplayHeads) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetResolution function as declared in nvml/nvml.h +func nvmlVgpuTypeGetResolution(VgpuTypeId VgpuTypeId, DisplayIndex uint32, Xdim *uint32, Ydim *uint32) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cDisplayIndex, _ := (C.uint)(DisplayIndex), cgoAllocsUnknown + cXdim, _ := (*C.uint)(unsafe.Pointer(Xdim)), cgoAllocsUnknown + cYdim, _ := (*C.uint)(unsafe.Pointer(Ydim)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetResolution(cVgpuTypeId, cDisplayIndex, cXdim, cYdim) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetLicense function as declared in nvml/nvml.h +func nvmlVgpuTypeGetLicense(VgpuTypeId VgpuTypeId, VgpuTypeLicenseString *byte, Size uint32) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cVgpuTypeLicenseString, _ := (*C.char)(unsafe.Pointer(VgpuTypeLicenseString)), cgoAllocsUnknown + cSize, _ := (C.uint)(Size), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetLicense(cVgpuTypeId, cVgpuTypeLicenseString, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetFrameRateLimit function as declared in nvml/nvml.h +func nvmlVgpuTypeGetFrameRateLimit(VgpuTypeId VgpuTypeId, FrameRateLimit *uint32) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cFrameRateLimit, _ := (*C.uint)(unsafe.Pointer(FrameRateLimit)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetFrameRateLimit(cVgpuTypeId, cFrameRateLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetMaxInstances function as declared in nvml/nvml.h +func nvmlVgpuTypeGetMaxInstances(Device Device, VgpuTypeId VgpuTypeId, VgpuInstanceCount *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cVgpuInstanceCount, _ := (*C.uint)(unsafe.Pointer(VgpuInstanceCount)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetMaxInstances(cDevice, cVgpuTypeId, cVgpuInstanceCount) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetMaxInstancesPerVm function as declared in nvml/nvml.h +func nvmlVgpuTypeGetMaxInstancesPerVm(VgpuTypeId VgpuTypeId, VgpuInstanceCountPerVm *uint32) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cVgpuInstanceCountPerVm, _ := (*C.uint)(unsafe.Pointer(VgpuInstanceCountPerVm)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetMaxInstancesPerVm(cVgpuTypeId, cVgpuInstanceCountPerVm) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetActiveVgpus function as declared in nvml/nvml.h +func nvmlDeviceGetActiveVgpus(Device Device, VgpuCount *uint32, VgpuInstances *VgpuInstance) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cVgpuCount, _ := (*C.uint)(unsafe.Pointer(VgpuCount)), cgoAllocsUnknown + cVgpuInstances, _ := (*C.nvmlVgpuInstance_t)(unsafe.Pointer(VgpuInstances)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetActiveVgpus(cDevice, cVgpuCount, cVgpuInstances) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetVmID function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetVmID(VgpuInstance VgpuInstance, VmId *byte, Size uint32, VmIdType *VgpuVmIdType) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cVmId, _ := (*C.char)(unsafe.Pointer(VmId)), cgoAllocsUnknown + cSize, _ := (C.uint)(Size), cgoAllocsUnknown + cVmIdType, _ := (*C.nvmlVgpuVmIdType_t)(unsafe.Pointer(VmIdType)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetVmID(cVgpuInstance, cVmId, cSize, cVmIdType) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetUUID function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetUUID(VgpuInstance VgpuInstance, Uuid *byte, Size uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cUuid, _ := (*C.char)(unsafe.Pointer(Uuid)), cgoAllocsUnknown + cSize, _ := (C.uint)(Size), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetUUID(cVgpuInstance, cUuid, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetVmDriverVersion function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetVmDriverVersion(VgpuInstance VgpuInstance, Version *byte, Length uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cVersion, _ := (*C.char)(unsafe.Pointer(Version)), cgoAllocsUnknown + cLength, _ := (C.uint)(Length), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetVmDriverVersion(cVgpuInstance, cVersion, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetFbUsage function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetFbUsage(VgpuInstance VgpuInstance, FbUsage *uint64) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cFbUsage, _ := (*C.ulonglong)(unsafe.Pointer(FbUsage)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetFbUsage(cVgpuInstance, cFbUsage) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetLicenseStatus function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetLicenseStatus(VgpuInstance VgpuInstance, Licensed *uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cLicensed, _ := (*C.uint)(unsafe.Pointer(Licensed)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetLicenseStatus(cVgpuInstance, cLicensed) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetType function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetType(VgpuInstance VgpuInstance, VgpuTypeId *VgpuTypeId) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cVgpuTypeId, _ := (*C.nvmlVgpuTypeId_t)(unsafe.Pointer(VgpuTypeId)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetType(cVgpuInstance, cVgpuTypeId) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetFrameRateLimit function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetFrameRateLimit(VgpuInstance VgpuInstance, FrameRateLimit *uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cFrameRateLimit, _ := (*C.uint)(unsafe.Pointer(FrameRateLimit)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetFrameRateLimit(cVgpuInstance, cFrameRateLimit) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetEccMode function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetEccMode(VgpuInstance VgpuInstance, EccMode *EnableState) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cEccMode, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(EccMode)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetEccMode(cVgpuInstance, cEccMode) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetEncoderCapacity function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetEncoderCapacity(VgpuInstance VgpuInstance, EncoderCapacity *uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cEncoderCapacity, _ := (*C.uint)(unsafe.Pointer(EncoderCapacity)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetEncoderCapacity(cVgpuInstance, cEncoderCapacity) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceSetEncoderCapacity function as declared in nvml/nvml.h +func nvmlVgpuInstanceSetEncoderCapacity(VgpuInstance VgpuInstance, EncoderCapacity uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cEncoderCapacity, _ := (C.uint)(EncoderCapacity), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceSetEncoderCapacity(cVgpuInstance, cEncoderCapacity) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetEncoderStats function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetEncoderStats(VgpuInstance VgpuInstance, SessionCount *uint32, AverageFps *uint32, AverageLatency *uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cAverageFps, _ := (*C.uint)(unsafe.Pointer(AverageFps)), cgoAllocsUnknown + cAverageLatency, _ := (*C.uint)(unsafe.Pointer(AverageLatency)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetEncoderStats(cVgpuInstance, cSessionCount, cAverageFps, cAverageLatency) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetEncoderSessions function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetEncoderSessions(VgpuInstance VgpuInstance, SessionCount *uint32, SessionInfo *EncoderSessionInfo) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cSessionInfo, _ := (*C.nvmlEncoderSessionInfo_t)(unsafe.Pointer(SessionInfo)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetEncoderSessions(cVgpuInstance, cSessionCount, cSessionInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetFBCStats function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetFBCStats(VgpuInstance VgpuInstance, FbcStats *FBCStats) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cFbcStats, _ := (*C.nvmlFBCStats_t)(unsafe.Pointer(FbcStats)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetFBCStats(cVgpuInstance, cFbcStats) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetFBCSessions function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetFBCSessions(VgpuInstance VgpuInstance, SessionCount *uint32, SessionInfo *FBCSessionInfo) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cSessionCount, _ := (*C.uint)(unsafe.Pointer(SessionCount)), cgoAllocsUnknown + cSessionInfo, _ := (*C.nvmlFBCSessionInfo_t)(unsafe.Pointer(SessionInfo)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetFBCSessions(cVgpuInstance, cSessionCount, cSessionInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetGpuInstanceId function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetGpuInstanceId(VgpuInstance VgpuInstance, GpuInstanceId *uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cGpuInstanceId, _ := (*C.uint)(unsafe.Pointer(GpuInstanceId)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetGpuInstanceId(cVgpuInstance, cGpuInstanceId) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetGpuPciId function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetGpuPciId(VgpuInstance VgpuInstance, VgpuPciId *byte, Length *uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cVgpuPciId, _ := (*C.char)(unsafe.Pointer(VgpuPciId)), cgoAllocsUnknown + cLength, _ := (*C.uint)(unsafe.Pointer(Length)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetGpuPciId(cVgpuInstance, cVgpuPciId, cLength) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuTypeGetCapabilities function as declared in nvml/nvml.h +func nvmlVgpuTypeGetCapabilities(VgpuTypeId VgpuTypeId, Capability VgpuCapability, CapResult *uint32) Return { + cVgpuTypeId, _ := (C.nvmlVgpuTypeId_t)(VgpuTypeId), cgoAllocsUnknown + cCapability, _ := (C.nvmlVgpuCapability_t)(Capability), cgoAllocsUnknown + cCapResult, _ := (*C.uint)(unsafe.Pointer(CapResult)), cgoAllocsUnknown + __ret := C.nvmlVgpuTypeGetCapabilities(cVgpuTypeId, cCapability, cCapResult) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetMetadata function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetMetadata(VgpuInstance VgpuInstance, nvmlVgpuMetadata *nvmlVgpuMetadata, BufferSize *uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cnvmlVgpuMetadata, _ := (*C.nvmlVgpuMetadata_t)(unsafe.Pointer(nvmlVgpuMetadata)), cgoAllocsUnknown + cBufferSize, _ := (*C.uint)(unsafe.Pointer(BufferSize)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetMetadata(cVgpuInstance, cnvmlVgpuMetadata, cBufferSize) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuMetadata function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuMetadata(Device Device, PgpuMetadata *nvmlVgpuPgpuMetadata, BufferSize *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPgpuMetadata, _ := (*C.nvmlVgpuPgpuMetadata_t)(unsafe.Pointer(PgpuMetadata)), cgoAllocsUnknown + cBufferSize, _ := (*C.uint)(unsafe.Pointer(BufferSize)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuMetadata(cDevice, cPgpuMetadata, cBufferSize) + __v := (Return)(__ret) + return __v +} + +// nvmlGetVgpuCompatibility function as declared in nvml/nvml.h +func nvmlGetVgpuCompatibility(nvmlVgpuMetadata *nvmlVgpuMetadata, PgpuMetadata *nvmlVgpuPgpuMetadata, CompatibilityInfo *VgpuPgpuCompatibility) Return { + cnvmlVgpuMetadata, _ := (*C.nvmlVgpuMetadata_t)(unsafe.Pointer(nvmlVgpuMetadata)), cgoAllocsUnknown + cPgpuMetadata, _ := (*C.nvmlVgpuPgpuMetadata_t)(unsafe.Pointer(PgpuMetadata)), cgoAllocsUnknown + cCompatibilityInfo, _ := (*C.nvmlVgpuPgpuCompatibility_t)(unsafe.Pointer(CompatibilityInfo)), cgoAllocsUnknown + __ret := C.nvmlGetVgpuCompatibility(cnvmlVgpuMetadata, cPgpuMetadata, cCompatibilityInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPgpuMetadataString function as declared in nvml/nvml.h +func nvmlDeviceGetPgpuMetadataString(Device Device, PgpuMetadata *byte, BufferSize *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPgpuMetadata, _ := (*C.char)(unsafe.Pointer(PgpuMetadata)), cgoAllocsUnknown + cBufferSize, _ := (*C.uint)(unsafe.Pointer(BufferSize)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPgpuMetadataString(cDevice, cPgpuMetadata, cBufferSize) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuSchedulerLog function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuSchedulerLog(Device Device, PSchedulerLog *VgpuSchedulerLog) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPSchedulerLog, _ := (*C.nvmlVgpuSchedulerLog_t)(unsafe.Pointer(PSchedulerLog)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuSchedulerLog(cDevice, cPSchedulerLog) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuSchedulerState function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuSchedulerState(Device Device, PSchedulerState *VgpuSchedulerGetState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPSchedulerState, _ := (*C.nvmlVgpuSchedulerGetState_t)(unsafe.Pointer(PSchedulerState)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuSchedulerState(cDevice, cPSchedulerState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetVgpuSchedulerState function as declared in nvml/nvml.h +func nvmlDeviceSetVgpuSchedulerState(Device Device, PSchedulerState *VgpuSchedulerSetState) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPSchedulerState, _ := (*C.nvmlVgpuSchedulerSetState_t)(unsafe.Pointer(PSchedulerState)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetVgpuSchedulerState(cDevice, cPSchedulerState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuSchedulerCapabilities function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuSchedulerCapabilities(Device Device, PCapabilities *VgpuSchedulerCapabilities) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPCapabilities, _ := (*C.nvmlVgpuSchedulerCapabilities_t)(unsafe.Pointer(PCapabilities)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuSchedulerCapabilities(cDevice, cPCapabilities) + __v := (Return)(__ret) + return __v +} + +// nvmlGetVgpuVersion function as declared in nvml/nvml.h +func nvmlGetVgpuVersion(Supported *VgpuVersion, Current *VgpuVersion) Return { + cSupported, _ := (*C.nvmlVgpuVersion_t)(unsafe.Pointer(Supported)), cgoAllocsUnknown + cCurrent, _ := (*C.nvmlVgpuVersion_t)(unsafe.Pointer(Current)), cgoAllocsUnknown + __ret := C.nvmlGetVgpuVersion(cSupported, cCurrent) + __v := (Return)(__ret) + return __v +} + +// nvmlSetVgpuVersion function as declared in nvml/nvml.h +func nvmlSetVgpuVersion(VgpuVersion *VgpuVersion) Return { + cVgpuVersion, _ := (*C.nvmlVgpuVersion_t)(unsafe.Pointer(VgpuVersion)), cgoAllocsUnknown + __ret := C.nvmlSetVgpuVersion(cVgpuVersion) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuUtilization(Device Device, LastSeenTimeStamp uint64, SampleValType *ValueType, VgpuInstanceSamplesCount *uint32, UtilizationSamples *VgpuInstanceUtilizationSample) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLastSeenTimeStamp, _ := (C.ulonglong)(LastSeenTimeStamp), cgoAllocsUnknown + cSampleValType, _ := (*C.nvmlValueType_t)(unsafe.Pointer(SampleValType)), cgoAllocsUnknown + cVgpuInstanceSamplesCount, _ := (*C.uint)(unsafe.Pointer(VgpuInstanceSamplesCount)), cgoAllocsUnknown + cUtilizationSamples, _ := (*C.nvmlVgpuInstanceUtilizationSample_t)(unsafe.Pointer(UtilizationSamples)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuUtilization(cDevice, cLastSeenTimeStamp, cSampleValType, cVgpuInstanceSamplesCount, cUtilizationSamples) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetVgpuProcessUtilization function as declared in nvml/nvml.h +func nvmlDeviceGetVgpuProcessUtilization(Device Device, LastSeenTimeStamp uint64, VgpuProcessSamplesCount *uint32, UtilizationSamples *VgpuProcessUtilizationSample) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLastSeenTimeStamp, _ := (C.ulonglong)(LastSeenTimeStamp), cgoAllocsUnknown + cVgpuProcessSamplesCount, _ := (*C.uint)(unsafe.Pointer(VgpuProcessSamplesCount)), cgoAllocsUnknown + cUtilizationSamples, _ := (*C.nvmlVgpuProcessUtilizationSample_t)(unsafe.Pointer(UtilizationSamples)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetVgpuProcessUtilization(cDevice, cLastSeenTimeStamp, cVgpuProcessSamplesCount, cUtilizationSamples) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetAccountingMode function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetAccountingMode(VgpuInstance VgpuInstance, Mode *EnableState) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cMode, _ := (*C.nvmlEnableState_t)(unsafe.Pointer(Mode)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetAccountingMode(cVgpuInstance, cMode) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetAccountingPids function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetAccountingPids(VgpuInstance VgpuInstance, Count *uint32, Pids *uint32) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + cPids, _ := (*C.uint)(unsafe.Pointer(Pids)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetAccountingPids(cVgpuInstance, cCount, cPids) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetAccountingStats function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetAccountingStats(VgpuInstance VgpuInstance, Pid uint32, Stats *AccountingStats) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cPid, _ := (C.uint)(Pid), cgoAllocsUnknown + cStats, _ := (*C.nvmlAccountingStats_t)(unsafe.Pointer(Stats)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetAccountingStats(cVgpuInstance, cPid, cStats) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceClearAccountingPids function as declared in nvml/nvml.h +func nvmlVgpuInstanceClearAccountingPids(VgpuInstance VgpuInstance) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceClearAccountingPids(cVgpuInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetLicenseInfo_v2 function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetLicenseInfo_v2(VgpuInstance VgpuInstance, LicenseInfo *VgpuLicenseInfo) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cLicenseInfo, _ := (*C.nvmlVgpuLicenseInfo_t)(unsafe.Pointer(LicenseInfo)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetLicenseInfo_v2(cVgpuInstance, cLicenseInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlGetExcludedDeviceCount function as declared in nvml/nvml.h +func nvmlGetExcludedDeviceCount(DeviceCount *uint32) Return { + cDeviceCount, _ := (*C.uint)(unsafe.Pointer(DeviceCount)), cgoAllocsUnknown + __ret := C.nvmlGetExcludedDeviceCount(cDeviceCount) + __v := (Return)(__ret) + return __v +} + +// nvmlGetExcludedDeviceInfoByIndex function as declared in nvml/nvml.h +func nvmlGetExcludedDeviceInfoByIndex(Index uint32, Info *ExcludedDeviceInfo) Return { + cIndex, _ := (C.uint)(Index), cgoAllocsUnknown + cInfo, _ := (*C.nvmlExcludedDeviceInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlGetExcludedDeviceInfoByIndex(cIndex, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetMigMode function as declared in nvml/nvml.h +func nvmlDeviceSetMigMode(Device Device, Mode uint32, ActivationStatus *Return) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMode, _ := (C.uint)(Mode), cgoAllocsUnknown + cActivationStatus, _ := (*C.nvmlReturn_t)(unsafe.Pointer(ActivationStatus)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetMigMode(cDevice, cMode, cActivationStatus) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMigMode function as declared in nvml/nvml.h +func nvmlDeviceGetMigMode(Device Device, CurrentMode *uint32, PendingMode *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCurrentMode, _ := (*C.uint)(unsafe.Pointer(CurrentMode)), cgoAllocsUnknown + cPendingMode, _ := (*C.uint)(unsafe.Pointer(PendingMode)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMigMode(cDevice, cCurrentMode, cPendingMode) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstanceProfileInfo function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstanceProfileInfo(Device Device, Profile uint32, Info *GpuInstanceProfileInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cProfile, _ := (C.uint)(Profile), cgoAllocsUnknown + cInfo, _ := (*C.nvmlGpuInstanceProfileInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstanceProfileInfo(cDevice, cProfile, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstanceProfileInfoV function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstanceProfileInfoV(Device Device, Profile uint32, Info *GpuInstanceProfileInfo_v2) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cProfile, _ := (C.uint)(Profile), cgoAllocsUnknown + cInfo, _ := (*C.nvmlGpuInstanceProfileInfo_v2_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstanceProfileInfoV(cDevice, cProfile, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstancePossiblePlacements_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstancePossiblePlacements_v2(Device Device, ProfileId uint32, Placements *GpuInstancePlacement, Count *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cPlacements, _ := (*C.nvmlGpuInstancePlacement_t)(unsafe.Pointer(Placements)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstancePossiblePlacements_v2(cDevice, cProfileId, cPlacements, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstanceRemainingCapacity function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstanceRemainingCapacity(Device Device, ProfileId uint32, Count *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstanceRemainingCapacity(cDevice, cProfileId, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceCreateGpuInstance function as declared in nvml/nvml.h +func nvmlDeviceCreateGpuInstance(Device Device, ProfileId uint32, GpuInstance *GpuInstance) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cGpuInstance, _ := (*C.nvmlGpuInstance_t)(unsafe.Pointer(GpuInstance)), cgoAllocsUnknown + __ret := C.nvmlDeviceCreateGpuInstance(cDevice, cProfileId, cGpuInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceCreateGpuInstanceWithPlacement function as declared in nvml/nvml.h +func nvmlDeviceCreateGpuInstanceWithPlacement(Device Device, ProfileId uint32, Placement *GpuInstancePlacement, GpuInstance *GpuInstance) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cPlacement, _ := (*C.nvmlGpuInstancePlacement_t)(unsafe.Pointer(Placement)), cgoAllocsUnknown + cGpuInstance, _ := (*C.nvmlGpuInstance_t)(unsafe.Pointer(GpuInstance)), cgoAllocsUnknown + __ret := C.nvmlDeviceCreateGpuInstanceWithPlacement(cDevice, cProfileId, cPlacement, cGpuInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceDestroy function as declared in nvml/nvml.h +func nvmlGpuInstanceDestroy(GpuInstance GpuInstance) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceDestroy(cGpuInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstances function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstances(Device Device, ProfileId uint32, GpuInstances *GpuInstance, Count *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cGpuInstances, _ := (*C.nvmlGpuInstance_t)(unsafe.Pointer(GpuInstances)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstances(cDevice, cProfileId, cGpuInstances, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstanceById function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstanceById(Device Device, Id uint32, GpuInstance *GpuInstance) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cId, _ := (C.uint)(Id), cgoAllocsUnknown + cGpuInstance, _ := (*C.nvmlGpuInstance_t)(unsafe.Pointer(GpuInstance)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstanceById(cDevice, cId, cGpuInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetInfo function as declared in nvml/nvml.h +func nvmlGpuInstanceGetInfo(GpuInstance GpuInstance, Info *GpuInstanceInfo) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + cInfo, _ := (*C.nvmlGpuInstanceInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetInfo(cGpuInstance, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetComputeInstanceProfileInfo function as declared in nvml/nvml.h +func nvmlGpuInstanceGetComputeInstanceProfileInfo(GpuInstance GpuInstance, Profile uint32, EngProfile uint32, Info *ComputeInstanceProfileInfo) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + cProfile, _ := (C.uint)(Profile), cgoAllocsUnknown + cEngProfile, _ := (C.uint)(EngProfile), cgoAllocsUnknown + cInfo, _ := (*C.nvmlComputeInstanceProfileInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetComputeInstanceProfileInfo(cGpuInstance, cProfile, cEngProfile, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetComputeInstanceProfileInfoV function as declared in nvml/nvml.h +func nvmlGpuInstanceGetComputeInstanceProfileInfoV(GpuInstance GpuInstance, Profile uint32, EngProfile uint32, Info *ComputeInstanceProfileInfo_v2) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + cProfile, _ := (C.uint)(Profile), cgoAllocsUnknown + cEngProfile, _ := (C.uint)(EngProfile), cgoAllocsUnknown + cInfo, _ := (*C.nvmlComputeInstanceProfileInfo_v2_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetComputeInstanceProfileInfoV(cGpuInstance, cProfile, cEngProfile, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetComputeInstanceRemainingCapacity function as declared in nvml/nvml.h +func nvmlGpuInstanceGetComputeInstanceRemainingCapacity(GpuInstance GpuInstance, ProfileId uint32, Count *uint32) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetComputeInstanceRemainingCapacity(cGpuInstance, cProfileId, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetComputeInstancePossiblePlacements function as declared in nvml/nvml.h +func nvmlGpuInstanceGetComputeInstancePossiblePlacements(GpuInstance GpuInstance, ProfileId uint32, Placements *ComputeInstancePlacement, Count *uint32) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cPlacements, _ := (*C.nvmlComputeInstancePlacement_t)(unsafe.Pointer(Placements)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetComputeInstancePossiblePlacements(cGpuInstance, cProfileId, cPlacements, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceCreateComputeInstance function as declared in nvml/nvml.h +func nvmlGpuInstanceCreateComputeInstance(GpuInstance GpuInstance, ProfileId uint32, ComputeInstance *ComputeInstance) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cComputeInstance, _ := (*C.nvmlComputeInstance_t)(unsafe.Pointer(ComputeInstance)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceCreateComputeInstance(cGpuInstance, cProfileId, cComputeInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceCreateComputeInstanceWithPlacement function as declared in nvml/nvml.h +func nvmlGpuInstanceCreateComputeInstanceWithPlacement(GpuInstance GpuInstance, ProfileId uint32, Placement *ComputeInstancePlacement, ComputeInstance *ComputeInstance) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cPlacement, _ := (*C.nvmlComputeInstancePlacement_t)(unsafe.Pointer(Placement)), cgoAllocsUnknown + cComputeInstance, _ := (*C.nvmlComputeInstance_t)(unsafe.Pointer(ComputeInstance)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceCreateComputeInstanceWithPlacement(cGpuInstance, cProfileId, cPlacement, cComputeInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlComputeInstanceDestroy function as declared in nvml/nvml.h +func nvmlComputeInstanceDestroy(ComputeInstance ComputeInstance) Return { + cComputeInstance, _ := *(*C.nvmlComputeInstance_t)(unsafe.Pointer(&ComputeInstance)), cgoAllocsUnknown + __ret := C.nvmlComputeInstanceDestroy(cComputeInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetComputeInstances function as declared in nvml/nvml.h +func nvmlGpuInstanceGetComputeInstances(GpuInstance GpuInstance, ProfileId uint32, ComputeInstances *ComputeInstance, Count *uint32) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cComputeInstances, _ := (*C.nvmlComputeInstance_t)(unsafe.Pointer(ComputeInstances)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetComputeInstances(cGpuInstance, cProfileId, cComputeInstances, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlGpuInstanceGetComputeInstanceById function as declared in nvml/nvml.h +func nvmlGpuInstanceGetComputeInstanceById(GpuInstance GpuInstance, Id uint32, ComputeInstance *ComputeInstance) Return { + cGpuInstance, _ := *(*C.nvmlGpuInstance_t)(unsafe.Pointer(&GpuInstance)), cgoAllocsUnknown + cId, _ := (C.uint)(Id), cgoAllocsUnknown + cComputeInstance, _ := (*C.nvmlComputeInstance_t)(unsafe.Pointer(ComputeInstance)), cgoAllocsUnknown + __ret := C.nvmlGpuInstanceGetComputeInstanceById(cGpuInstance, cId, cComputeInstance) + __v := (Return)(__ret) + return __v +} + +// nvmlComputeInstanceGetInfo_v2 function as declared in nvml/nvml.h +func nvmlComputeInstanceGetInfo_v2(ComputeInstance ComputeInstance, Info *ComputeInstanceInfo) Return { + cComputeInstance, _ := *(*C.nvmlComputeInstance_t)(unsafe.Pointer(&ComputeInstance)), cgoAllocsUnknown + cInfo, _ := (*C.nvmlComputeInstanceInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlComputeInstanceGetInfo_v2(cComputeInstance, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceIsMigDeviceHandle function as declared in nvml/nvml.h +func nvmlDeviceIsMigDeviceHandle(Device Device, IsMigDevice *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cIsMigDevice, _ := (*C.uint)(unsafe.Pointer(IsMigDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceIsMigDeviceHandle(cDevice, cIsMigDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstanceId function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstanceId(Device Device, Id *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cId, _ := (*C.uint)(unsafe.Pointer(Id)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstanceId(cDevice, cId) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetComputeInstanceId function as declared in nvml/nvml.h +func nvmlDeviceGetComputeInstanceId(Device Device, Id *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cId, _ := (*C.uint)(unsafe.Pointer(Id)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetComputeInstanceId(cDevice, cId) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMaxMigDeviceCount function as declared in nvml/nvml.h +func nvmlDeviceGetMaxMigDeviceCount(Device Device, Count *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMaxMigDeviceCount(cDevice, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMigDeviceHandleByIndex function as declared in nvml/nvml.h +func nvmlDeviceGetMigDeviceHandleByIndex(Device Device, Index uint32, MigDevice *Device) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cIndex, _ := (C.uint)(Index), cgoAllocsUnknown + cMigDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(MigDevice)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMigDeviceHandleByIndex(cDevice, cIndex, cMigDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDeviceHandleFromMigDeviceHandle function as declared in nvml/nvml.h +func nvmlDeviceGetDeviceHandleFromMigDeviceHandle(MigDevice Device, Device *Device) Return { + cMigDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&MigDevice)), cgoAllocsUnknown + cDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDeviceHandleFromMigDeviceHandle(cMigDevice, cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetBusType function as declared in nvml/nvml.h +func nvmlDeviceGetBusType(Device Device, _type *BusType) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + c_type, _ := (*C.nvmlBusType_t)(unsafe.Pointer(_type)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetBusType(cDevice, c_type) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetDynamicPstatesInfo function as declared in nvml/nvml.h +func nvmlDeviceGetDynamicPstatesInfo(Device Device, PDynamicPstatesInfo *GpuDynamicPstatesInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPDynamicPstatesInfo, _ := (*C.nvmlGpuDynamicPstatesInfo_t)(unsafe.Pointer(PDynamicPstatesInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetDynamicPstatesInfo(cDevice, cPDynamicPstatesInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetFanSpeed_v2 function as declared in nvml/nvml.h +func nvmlDeviceSetFanSpeed_v2(Device Device, Fan uint32, Speed uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cFan, _ := (C.uint)(Fan), cgoAllocsUnknown + cSpeed, _ := (C.uint)(Speed), cgoAllocsUnknown + __ret := C.nvmlDeviceSetFanSpeed_v2(cDevice, cFan, cSpeed) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpcClkVfOffset function as declared in nvml/nvml.h +func nvmlDeviceGetGpcClkVfOffset(Device Device, Offset *int32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cOffset, _ := (*C.int)(unsafe.Pointer(Offset)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpcClkVfOffset(cDevice, cOffset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetGpcClkVfOffset function as declared in nvml/nvml.h +func nvmlDeviceSetGpcClkVfOffset(Device Device, Offset int32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cOffset, _ := (C.int)(Offset), cgoAllocsUnknown + __ret := C.nvmlDeviceSetGpcClkVfOffset(cDevice, cOffset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemClkVfOffset function as declared in nvml/nvml.h +func nvmlDeviceGetMemClkVfOffset(Device Device, Offset *int32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cOffset, _ := (*C.int)(unsafe.Pointer(Offset)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemClkVfOffset(cDevice, cOffset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetMemClkVfOffset function as declared in nvml/nvml.h +func nvmlDeviceSetMemClkVfOffset(Device Device, Offset int32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cOffset, _ := (C.int)(Offset), cgoAllocsUnknown + __ret := C.nvmlDeviceSetMemClkVfOffset(cDevice, cOffset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMinMaxClockOfPState function as declared in nvml/nvml.h +func nvmlDeviceGetMinMaxClockOfPState(Device Device, _type ClockType, Pstate Pstates, MinClockMHz *uint32, MaxClockMHz *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + c_type, _ := (C.nvmlClockType_t)(_type), cgoAllocsUnknown + cPstate, _ := (C.nvmlPstates_t)(Pstate), cgoAllocsUnknown + cMinClockMHz, _ := (*C.uint)(unsafe.Pointer(MinClockMHz)), cgoAllocsUnknown + cMaxClockMHz, _ := (*C.uint)(unsafe.Pointer(MaxClockMHz)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMinMaxClockOfPState(cDevice, c_type, cPstate, cMinClockMHz, cMaxClockMHz) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetSupportedPerformanceStates function as declared in nvml/nvml.h +func nvmlDeviceGetSupportedPerformanceStates(Device Device, Pstates *Pstates, Size uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPstates, _ := (*C.nvmlPstates_t)(unsafe.Pointer(Pstates)), cgoAllocsUnknown + cSize, _ := (C.uint)(Size), cgoAllocsUnknown + __ret := C.nvmlDeviceGetSupportedPerformanceStates(cDevice, cPstates, cSize) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpcClkMinMaxVfOffset function as declared in nvml/nvml.h +func nvmlDeviceGetGpcClkMinMaxVfOffset(Device Device, MinOffset *int32, MaxOffset *int32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMinOffset, _ := (*C.int)(unsafe.Pointer(MinOffset)), cgoAllocsUnknown + cMaxOffset, _ := (*C.int)(unsafe.Pointer(MaxOffset)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpcClkMinMaxVfOffset(cDevice, cMinOffset, cMaxOffset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMemClkMinMaxVfOffset function as declared in nvml/nvml.h +func nvmlDeviceGetMemClkMinMaxVfOffset(Device Device, MinOffset *int32, MaxOffset *int32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cMinOffset, _ := (*C.int)(unsafe.Pointer(MinOffset)), cgoAllocsUnknown + cMaxOffset, _ := (*C.int)(unsafe.Pointer(MaxOffset)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMemClkMinMaxVfOffset(cDevice, cMinOffset, cMaxOffset) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuFabricInfo function as declared in nvml/nvml.h +func nvmlDeviceGetGpuFabricInfo(Device Device, GpuFabricInfo *GpuFabricInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cGpuFabricInfo, _ := (*C.nvmlGpuFabricInfo_t)(unsafe.Pointer(GpuFabricInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuFabricInfo(cDevice, cGpuFabricInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlGpmMetricsGet function as declared in nvml/nvml.h +func nvmlGpmMetricsGet(MetricsGet *GpmMetricsGetType) Return { + cMetricsGet, _ := (*C.nvmlGpmMetricsGet_t)(unsafe.Pointer(MetricsGet)), cgoAllocsUnknown + __ret := C.nvmlGpmMetricsGet(cMetricsGet) + __v := (Return)(__ret) + return __v +} + +// nvmlGpmSampleFree function as declared in nvml/nvml.h +func nvmlGpmSampleFree(GpmSample GpmSample) Return { + cGpmSample, _ := *(*C.nvmlGpmSample_t)(unsafe.Pointer(&GpmSample)), cgoAllocsUnknown + __ret := C.nvmlGpmSampleFree(cGpmSample) + __v := (Return)(__ret) + return __v +} + +// nvmlGpmSampleAlloc function as declared in nvml/nvml.h +func nvmlGpmSampleAlloc(GpmSample *GpmSample) Return { + cGpmSample, _ := (*C.nvmlGpmSample_t)(unsafe.Pointer(GpmSample)), cgoAllocsUnknown + __ret := C.nvmlGpmSampleAlloc(cGpmSample) + __v := (Return)(__ret) + return __v +} + +// nvmlGpmSampleGet function as declared in nvml/nvml.h +func nvmlGpmSampleGet(Device Device, GpmSample GpmSample) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cGpmSample, _ := *(*C.nvmlGpmSample_t)(unsafe.Pointer(&GpmSample)), cgoAllocsUnknown + __ret := C.nvmlGpmSampleGet(cDevice, cGpmSample) + __v := (Return)(__ret) + return __v +} + +// nvmlGpmMigSampleGet function as declared in nvml/nvml.h +func nvmlGpmMigSampleGet(Device Device, GpuInstanceId uint32, GpmSample GpmSample) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cGpuInstanceId, _ := (C.uint)(GpuInstanceId), cgoAllocsUnknown + cGpmSample, _ := *(*C.nvmlGpmSample_t)(unsafe.Pointer(&GpmSample)), cgoAllocsUnknown + __ret := C.nvmlGpmMigSampleGet(cDevice, cGpuInstanceId, cGpmSample) + __v := (Return)(__ret) + return __v +} + +// nvmlGpmQueryDeviceSupport function as declared in nvml/nvml.h +func nvmlGpmQueryDeviceSupport(Device Device, GpmSupport *GpmSupport) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cGpmSupport, _ := (*C.nvmlGpmSupport_t)(unsafe.Pointer(GpmSupport)), cgoAllocsUnknown + __ret := C.nvmlGpmQueryDeviceSupport(cDevice, cGpmSupport) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceCcuGetStreamState function as declared in nvml/nvml.h +func nvmlDeviceCcuGetStreamState(Device Device, State *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cState, _ := (*C.uint)(unsafe.Pointer(State)), cgoAllocsUnknown + __ret := C.nvmlDeviceCcuGetStreamState(cDevice, cState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceCcuSetStreamState function as declared in nvml/nvml.h +func nvmlDeviceCcuSetStreamState(Device Device, State uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cState, _ := (C.uint)(State), cgoAllocsUnknown + __ret := C.nvmlDeviceCcuSetStreamState(cDevice, cState) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceSetNvLinkDeviceLowPowerThreshold function as declared in nvml/nvml.h +func nvmlDeviceSetNvLinkDeviceLowPowerThreshold(Device Device, Info *NvLinkPowerThres) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfo, _ := (*C.nvmlNvLinkPowerThres_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlDeviceSetNvLinkDeviceLowPowerThreshold(cDevice, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlInit_v1 function as declared in nvml/nvml.h +func nvmlInit_v1() Return { + __ret := C.nvmlInit() + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetCount_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetCount_v1(DeviceCount *uint32) Return { + cDeviceCount, _ := (*C.uint)(unsafe.Pointer(DeviceCount)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetCount(cDeviceCount) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleByIndex_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetHandleByIndex_v1(Index uint32, Device *Device) Return { + cIndex, _ := (C.uint)(Index), cgoAllocsUnknown + cDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleByIndex(cIndex, cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetHandleByPciBusId_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetHandleByPciBusId_v1(PciBusId string, Device *Device) Return { + cPciBusId, _ := unpackPCharString(PciBusId) + cDevice, _ := (*C.nvmlDevice_t)(unsafe.Pointer(Device)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetHandleByPciBusId(cPciBusId, cDevice) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPciInfo_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetPciInfo_v1(Device Device, Pci *PciInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPci, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(Pci)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPciInfo(cDevice, cPci) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetPciInfo_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetPciInfo_v2(Device Device, Pci *PciInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPci, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(Pci)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetPciInfo_v2(cDevice, cPci) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetNvLinkRemotePciInfo_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetNvLinkRemotePciInfo_v1(Device Device, Link uint32, Pci *PciInfo) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cLink, _ := (C.uint)(Link), cgoAllocsUnknown + cPci, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(Pci)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetNvLinkRemotePciInfo(cDevice, cLink, cPci) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGridLicensableFeatures_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetGridLicensableFeatures_v1(Device Device, PGridLicensableFeatures *GridLicensableFeatures) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPGridLicensableFeatures, _ := (*C.nvmlGridLicensableFeatures_t)(unsafe.Pointer(PGridLicensableFeatures)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGridLicensableFeatures(cDevice, cPGridLicensableFeatures) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGridLicensableFeatures_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetGridLicensableFeatures_v2(Device Device, PGridLicensableFeatures *GridLicensableFeatures) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPGridLicensableFeatures, _ := (*C.nvmlGridLicensableFeatures_t)(unsafe.Pointer(PGridLicensableFeatures)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGridLicensableFeatures_v2(cDevice, cPGridLicensableFeatures) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGridLicensableFeatures_v3 function as declared in nvml/nvml.h +func nvmlDeviceGetGridLicensableFeatures_v3(Device Device, PGridLicensableFeatures *GridLicensableFeatures) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cPGridLicensableFeatures, _ := (*C.nvmlGridLicensableFeatures_t)(unsafe.Pointer(PGridLicensableFeatures)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGridLicensableFeatures_v3(cDevice, cPGridLicensableFeatures) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceRemoveGpu_v1 function as declared in nvml/nvml.h +func nvmlDeviceRemoveGpu_v1(PciInfo *PciInfo) Return { + cPciInfo, _ := (*C.nvmlPciInfo_t)(unsafe.Pointer(PciInfo)), cgoAllocsUnknown + __ret := C.nvmlDeviceRemoveGpu(cPciInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlEventSetWait_v1 function as declared in nvml/nvml.h +func nvmlEventSetWait_v1(Set EventSet, Data *EventData, Timeoutms uint32) Return { + cSet, _ := *(*C.nvmlEventSet_t)(unsafe.Pointer(&Set)), cgoAllocsUnknown + cData, _ := (*C.nvmlEventData_t)(unsafe.Pointer(Data)), cgoAllocsUnknown + cTimeoutms, _ := (C.uint)(Timeoutms), cgoAllocsUnknown + __ret := C.nvmlEventSetWait(cSet, cData, cTimeoutms) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetAttributes_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetAttributes_v1(Device Device, Attributes *DeviceAttributes) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cAttributes, _ := (*C.nvmlDeviceAttributes_t)(unsafe.Pointer(Attributes)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetAttributes(cDevice, cAttributes) + __v := (Return)(__ret) + return __v +} + +// nvmlComputeInstanceGetInfo_v1 function as declared in nvml/nvml.h +func nvmlComputeInstanceGetInfo_v1(ComputeInstance ComputeInstance, Info *ComputeInstanceInfo) Return { + cComputeInstance, _ := *(*C.nvmlComputeInstance_t)(unsafe.Pointer(&ComputeInstance)), cgoAllocsUnknown + cInfo, _ := (*C.nvmlComputeInstanceInfo_t)(unsafe.Pointer(Info)), cgoAllocsUnknown + __ret := C.nvmlComputeInstanceGetInfo(cComputeInstance, cInfo) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetComputeRunningProcesses_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetComputeRunningProcesses_v1(Device Device, InfoCount *uint32, Infos *ProcessInfo_v1) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v1_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetComputeRunningProcesses(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetComputeRunningProcesses_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetComputeRunningProcesses_v2(Device Device, InfoCount *uint32, Infos *ProcessInfo_v2) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v2_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetComputeRunningProcesses_v2(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGraphicsRunningProcesses_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetGraphicsRunningProcesses_v1(Device Device, InfoCount *uint32, Infos *ProcessInfo_v1) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v1_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGraphicsRunningProcesses(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGraphicsRunningProcesses_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetGraphicsRunningProcesses_v2(Device Device, InfoCount *uint32, Infos *ProcessInfo_v2) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v2_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGraphicsRunningProcesses_v2(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMPSComputeRunningProcesses_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetMPSComputeRunningProcesses_v1(Device Device, InfoCount *uint32, Infos *ProcessInfo_v1) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v1_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMPSComputeRunningProcesses(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetMPSComputeRunningProcesses_v2 function as declared in nvml/nvml.h +func nvmlDeviceGetMPSComputeRunningProcesses_v2(Device Device, InfoCount *uint32, Infos *ProcessInfo_v2) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cInfoCount, _ := (*C.uint)(unsafe.Pointer(InfoCount)), cgoAllocsUnknown + cInfos, _ := (*C.nvmlProcessInfo_v2_t)(unsafe.Pointer(Infos)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetMPSComputeRunningProcesses_v2(cDevice, cInfoCount, cInfos) + __v := (Return)(__ret) + return __v +} + +// nvmlDeviceGetGpuInstancePossiblePlacements_v1 function as declared in nvml/nvml.h +func nvmlDeviceGetGpuInstancePossiblePlacements_v1(Device Device, ProfileId uint32, Placements *GpuInstancePlacement, Count *uint32) Return { + cDevice, _ := *(*C.nvmlDevice_t)(unsafe.Pointer(&Device)), cgoAllocsUnknown + cProfileId, _ := (C.uint)(ProfileId), cgoAllocsUnknown + cPlacements, _ := (*C.nvmlGpuInstancePlacement_t)(unsafe.Pointer(Placements)), cgoAllocsUnknown + cCount, _ := (*C.uint)(unsafe.Pointer(Count)), cgoAllocsUnknown + __ret := C.nvmlDeviceGetGpuInstancePossiblePlacements(cDevice, cProfileId, cPlacements, cCount) + __v := (Return)(__ret) + return __v +} + +// nvmlVgpuInstanceGetLicenseInfo_v1 function as declared in nvml/nvml.h +func nvmlVgpuInstanceGetLicenseInfo_v1(VgpuInstance VgpuInstance, LicenseInfo *VgpuLicenseInfo) Return { + cVgpuInstance, _ := (C.nvmlVgpuInstance_t)(VgpuInstance), cgoAllocsUnknown + cLicenseInfo, _ := (*C.nvmlVgpuLicenseInfo_t)(unsafe.Pointer(LicenseInfo)), cgoAllocsUnknown + __ret := C.nvmlVgpuInstanceGetLicenseInfo(cVgpuInstance, cLicenseInfo) + __v := (Return)(__ret) + return __v +} diff --git a/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml.h b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.h similarity index 76% rename from vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml.h rename to vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.h index 46e90d1..8c71ff8 100644 --- a/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml.h +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/nvml.h @@ -1,5 +1,7 @@ +/*** NVML VERSION: 12.0.76 ***/ +/*** From https://api.anaconda.org/download/nvidia/cuda-nvml-dev/12.0.76/linux-64/cuda-nvml-dev-12.0.76-0.tar.bz2 ***/ /* - * Copyright 1993-2020 NVIDIA Corporation. All rights reserved. + * Copyright 1993-2022 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * @@ -52,7 +54,7 @@ Supported products: - Full Support - All Tesla products, starting with the Fermi architecture - All Quadro products, starting with the Fermi architecture - - All GRID products, starting with the Kepler architecture + - All vGPU Software products, starting with the Kepler architecture - Selected GeForce Titan products - Limited Support - All Geforce products, starting with the Fermi architecture @@ -103,18 +105,30 @@ extern "C" { * guard if you need to support older versions of the API */ #ifndef NVML_NO_UNVERSIONED_FUNC_DEFS - #define nvmlInit nvmlInit_v2 - #define nvmlDeviceGetPciInfo nvmlDeviceGetPciInfo_v3 - #define nvmlDeviceGetCount nvmlDeviceGetCount_v2 - #define nvmlDeviceGetHandleByIndex nvmlDeviceGetHandleByIndex_v2 - #define nvmlDeviceGetHandleByPciBusId nvmlDeviceGetHandleByPciBusId_v2 - #define nvmlDeviceGetNvLinkRemotePciInfo nvmlDeviceGetNvLinkRemotePciInfo_v2 - #define nvmlDeviceRemoveGpu nvmlDeviceRemoveGpu_v2 - #define nvmlDeviceGetGridLicensableFeatures nvmlDeviceGetGridLicensableFeatures_v3 - #define nvmlEventSetWait nvmlEventSetWait_v2 - #define nvmlDeviceGetAttributes nvmlDeviceGetAttributes_v2 + #define nvmlInit nvmlInit_v2 + #define nvmlDeviceGetPciInfo nvmlDeviceGetPciInfo_v3 + #define nvmlDeviceGetCount nvmlDeviceGetCount_v2 + #define nvmlDeviceGetHandleByIndex nvmlDeviceGetHandleByIndex_v2 + #define nvmlDeviceGetHandleByPciBusId nvmlDeviceGetHandleByPciBusId_v2 + #define nvmlDeviceGetNvLinkRemotePciInfo nvmlDeviceGetNvLinkRemotePciInfo_v2 + #define nvmlDeviceRemoveGpu nvmlDeviceRemoveGpu_v2 + #define nvmlDeviceGetGridLicensableFeatures nvmlDeviceGetGridLicensableFeatures_v4 + #define nvmlEventSetWait nvmlEventSetWait_v2 + #define nvmlDeviceGetAttributes nvmlDeviceGetAttributes_v2 + #define nvmlComputeInstanceGetInfo nvmlComputeInstanceGetInfo_v2 + #define nvmlDeviceGetComputeRunningProcesses nvmlDeviceGetComputeRunningProcesses_v3 + #define nvmlDeviceGetGraphicsRunningProcesses nvmlDeviceGetGraphicsRunningProcesses_v3 + #define nvmlDeviceGetMPSComputeRunningProcesses nvmlDeviceGetMPSComputeRunningProcesses_v3 + #define nvmlBlacklistDeviceInfo_t nvmlExcludedDeviceInfo_t + #define nvmlGetBlacklistDeviceCount nvmlGetExcludedDeviceCount + #define nvmlGetBlacklistDeviceInfoByIndex nvmlGetExcludedDeviceInfoByIndex + #define nvmlDeviceGetGpuInstancePossiblePlacements nvmlDeviceGetGpuInstancePossiblePlacements_v2 + #define nvmlVgpuInstanceGetLicenseInfo nvmlVgpuInstanceGetLicenseInfo_v2 #endif // #ifndef NVML_NO_UNVERSIONED_FUNC_DEFS +#define NVML_STRUCT_VERSION(data, ver) (unsigned int)(sizeof(nvml ## data ## _v ## ver ## _t) | \ + (ver << 24U)) + /***************************************************************************************************/ /** @defgroup nvmlDeviceStructs Device Structs * @{ @@ -129,7 +143,10 @@ extern "C" { */ #define NVML_VALUE_NOT_AVAILABLE (-1) -typedef struct nvmlDevice_st* nvmlDevice_t; +typedef struct +{ + struct nvmlDevice_st* handle; +} nvmlDevice_t; /** * Buffer size guaranteed to be large enough for pci bus id @@ -200,15 +217,34 @@ typedef struct nvmlUtilization_st } nvmlUtilization_t; /** - * Memory allocation information for a device. + * Memory allocation information for a device (v1). + * The total amount is equal to the sum of the amounts of free and used memory. */ typedef struct nvmlMemory_st { - unsigned long long total; //!< Total installed FB memory (in bytes) - unsigned long long free; //!< Unallocated FB memory (in bytes) - unsigned long long used; //!< Allocated FB memory (in bytes). Note that the driver/GPU always sets aside a small amount of memory for bookkeeping + unsigned long long total; //!< Total physical device memory (in bytes) + unsigned long long free; //!< Unallocated device memory (in bytes) + unsigned long long used; //!< Sum of Reserved and Allocated device memory (in bytes). + //!< Note that the driver/GPU always sets aside a small amount of memory for bookkeeping } nvmlMemory_t; +/** + * Memory allocation information for a device (v2). + * + * Version 2 adds versioning for the struct and the amount of system-reserved memory as an output. + * @note The \ref nvmlMemory_v2_t.used amount also includes the \ref nvmlMemory_v2_t.reserved amount. + */ +typedef struct nvmlMemory_v2_st +{ + unsigned int version; //!< Structure format version (must be 2) + unsigned long long total; //!< Total physical device memory (in bytes) + unsigned long long reserved; //!< Device memory (in bytes) reserved for system use (driver or firmware) + unsigned long long free; //!< Unallocated device memory (in bytes) + unsigned long long used; //!< Allocated device memory (in bytes). Note that the driver/GPU always sets aside a small amount of memory for bookkeeping +} nvmlMemory_v2_t; + +#define nvmlMemory_v2 NVML_STRUCT_VERSION(Memory, 2) + /** * BAR1 Memory allocation Information for a device */ @@ -219,15 +255,47 @@ typedef struct nvmlBAR1Memory_st unsigned long long bar1Used; //!< Allocated Used Memory (in bytes) }nvmlBAR1Memory_t; +/** + * Information about running compute processes on the GPU, legacy version + * for older versions of the API. + */ +typedef struct nvmlProcessInfo_v1_st +{ + unsigned int pid; //!< Process ID + unsigned long long usedGpuMemory; //!< Amount of used GPU memory in bytes. + //! Under WDDM, \ref NVML_VALUE_NOT_AVAILABLE is always reported + //! because Windows KMD manages all the memory and not the NVIDIA driver +} nvmlProcessInfo_v1_t; + +/** + * Information about running compute processes on the GPU + */ +typedef struct nvmlProcessInfo_v2_st +{ + unsigned int pid; //!< Process ID + unsigned long long usedGpuMemory; //!< Amount of used GPU memory in bytes. + //! Under WDDM, \ref NVML_VALUE_NOT_AVAILABLE is always reported + //! because Windows KMD manages all the memory and not the NVIDIA driver + unsigned int gpuInstanceId; //!< If MIG is enabled, stores a valid GPU instance ID. gpuInstanceId is set to + // 0xFFFFFFFF otherwise. + unsigned int computeInstanceId; //!< If MIG is enabled, stores a valid compute instance ID. computeInstanceId is set to + // 0xFFFFFFFF otherwise. +} nvmlProcessInfo_v2_t; + /** * Information about running compute processes on the GPU + * Version 2 adds versioning for the struct */ typedef struct nvmlProcessInfo_st { - unsigned int pid; //!< Process ID - unsigned long long usedGpuMemory; //!< Amount of used GPU memory in bytes. - //! Under WDDM, \ref NVML_VALUE_NOT_AVAILABLE is always reported - //! because Windows KMD manages all the memory and not the NVIDIA driver + unsigned int pid; //!< Process ID + unsigned long long usedGpuMemory; //!< Amount of used GPU memory in bytes. + //! Under WDDM, \ref NVML_VALUE_NOT_AVAILABLE is always reported + //! because Windows KMD manages all the memory and not the NVIDIA driver + unsigned int gpuInstanceId; //!< If MIG is enabled, stores a valid GPU instance ID. gpuInstanceId is set to + // 0xFFFFFFFF otherwise. + unsigned int computeInstanceId; //!< If MIG is enabled, stores a valid compute instance ID. computeInstanceId is set to + // 0xFFFFFFFF otherwise. } nvmlProcessInfo_t; typedef struct nvmlDeviceAttributes_st @@ -270,7 +338,7 @@ typedef enum nvmlBridgeChipType_enum /** * Maximum number of NvLink links supported */ -#define NVML_NVLINK_MAX_LINKS 12 +#define NVML_NVLINK_MAX_LINKS 18 /** * Enum to represent the NvLink utilization counter packet units @@ -338,11 +406,23 @@ typedef enum nvmlNvLinkErrorCounter_enum NVML_NVLINK_ERROR_DL_RECOVERY = 1, // Data link transmit recovery error counter NVML_NVLINK_ERROR_DL_CRC_FLIT = 2, // Data link receive flow control digit CRC error counter NVML_NVLINK_ERROR_DL_CRC_DATA = 3, // Data link receive data CRC error counter + NVML_NVLINK_ERROR_DL_ECC_DATA = 4, // Data link receive data ECC error counter // this must be last NVML_NVLINK_ERROR_COUNT } nvmlNvLinkErrorCounter_t; +/** + * Enum to represent NvLink's remote device type + */ +typedef enum nvmlIntNvLinkDeviceType_enum +{ + NVML_NVLINK_DEVICE_TYPE_GPU = 0x00, + NVML_NVLINK_DEVICE_TYPE_IBMNPU = 0x01, + NVML_NVLINK_DEVICE_TYPE_SWITCH = 0x02, + NVML_NVLINK_DEVICE_TYPE_UNKNOWN = 0xFF +} nvmlIntNvLinkDeviceType_t; + /** * Represents level relationships within a system between two GPUs * The enums are spaced to allow for future relationships @@ -354,7 +434,7 @@ typedef enum nvmlGpuLevel_enum NVML_TOPOLOGY_MULTIPLE = 20, // all devices that need not traverse a host bridge NVML_TOPOLOGY_HOSTBRIDGE = 30, // all devices that are connected to the same host bridge NVML_TOPOLOGY_NODE = 40, // all devices that are connected to the same NUMA node but possibly multiple host bridges - NVML_TOPOLOGY_SYSTEM = 50, // all devices in the system + NVML_TOPOLOGY_SYSTEM = 50 // all devices in the system // there is purposefully no COUNT here because of the need for spacing above } nvmlGpuTopologyLevel_t; @@ -473,7 +553,7 @@ typedef union nvmlValue_st typedef struct nvmlSample_st { unsigned long long timeStamp; //!< CPU Timestamp in microseconds - nvmlValue_t sampleValue; //!< Sample Value + nvmlValue_t sampleValue; //!< Sample Value }nvmlSample_t; /** @@ -504,6 +584,61 @@ typedef struct nvmlViolationTime_st unsigned long long violationTime; //!< violationTime in Nanoseconds }nvmlViolationTime_t; +#define NVML_MAX_THERMAL_SENSORS_PER_GPU 3 + +typedef enum +{ + NVML_THERMAL_TARGET_NONE = 0, + NVML_THERMAL_TARGET_GPU = 1, //!< GPU core temperature requires NvPhysicalGpuHandle + NVML_THERMAL_TARGET_MEMORY = 2, //!< GPU memory temperature requires NvPhysicalGpuHandle + NVML_THERMAL_TARGET_POWER_SUPPLY = 4, //!< GPU power supply temperature requires NvPhysicalGpuHandle + NVML_THERMAL_TARGET_BOARD = 8, //!< GPU board ambient temperature requires NvPhysicalGpuHandle + NVML_THERMAL_TARGET_VCD_BOARD = 9, //!< Visual Computing Device Board temperature requires NvVisualComputingDeviceHandle + NVML_THERMAL_TARGET_VCD_INLET = 10, //!< Visual Computing Device Inlet temperature requires NvVisualComputingDeviceHandle + NVML_THERMAL_TARGET_VCD_OUTLET = 11, //!< Visual Computing Device Outlet temperature requires NvVisualComputingDeviceHandle + + NVML_THERMAL_TARGET_ALL = 15, + NVML_THERMAL_TARGET_UNKNOWN = -1, +} nvmlThermalTarget_t; + +typedef enum +{ + NVML_THERMAL_CONTROLLER_NONE = 0, + NVML_THERMAL_CONTROLLER_GPU_INTERNAL, + NVML_THERMAL_CONTROLLER_ADM1032, + NVML_THERMAL_CONTROLLER_ADT7461, + NVML_THERMAL_CONTROLLER_MAX6649, + NVML_THERMAL_CONTROLLER_MAX1617, + NVML_THERMAL_CONTROLLER_LM99, + NVML_THERMAL_CONTROLLER_LM89, + NVML_THERMAL_CONTROLLER_LM64, + NVML_THERMAL_CONTROLLER_G781, + NVML_THERMAL_CONTROLLER_ADT7473, + NVML_THERMAL_CONTROLLER_SBMAX6649, + NVML_THERMAL_CONTROLLER_VBIOSEVT, + NVML_THERMAL_CONTROLLER_OS, + NVML_THERMAL_CONTROLLER_NVSYSCON_CANOAS, + NVML_THERMAL_CONTROLLER_NVSYSCON_E551, + NVML_THERMAL_CONTROLLER_MAX6649R, + NVML_THERMAL_CONTROLLER_ADT7473S, + NVML_THERMAL_CONTROLLER_UNKNOWN = -1, +} nvmlThermalController_t; + +typedef struct { + nvmlThermalController_t controller; + int defaultMinTemp; + int defaultMaxTemp; + int currentTemp; + nvmlThermalTarget_t target; +} nvmlGpuThermalSettingsSensor_t; + +typedef struct +{ + unsigned int count; + nvmlGpuThermalSettingsSensor_t sensor[NVML_MAX_THERMAL_SENSORS_PER_GPU]; + +} nvmlGpuThermalSettings_t; + /** @} */ /***************************************************************************************************/ @@ -531,13 +666,24 @@ typedef enum nvmlEnableState_enum * */ typedef enum nvmlBrandType_enum { - NVML_BRAND_UNKNOWN = 0, - NVML_BRAND_QUADRO = 1, - NVML_BRAND_TESLA = 2, - NVML_BRAND_NVS = 3, - NVML_BRAND_GRID = 4, - NVML_BRAND_GEFORCE = 5, - NVML_BRAND_TITAN = 6, + NVML_BRAND_UNKNOWN = 0, + NVML_BRAND_QUADRO = 1, + NVML_BRAND_TESLA = 2, + NVML_BRAND_NVS = 3, + NVML_BRAND_GRID = 4, // Deprecated from API reporting. Keeping definition for backward compatibility. + NVML_BRAND_GEFORCE = 5, + NVML_BRAND_TITAN = 6, + NVML_BRAND_NVIDIA_VAPPS = 7, // NVIDIA Virtual Applications + NVML_BRAND_NVIDIA_VPC = 8, // NVIDIA Virtual PC + NVML_BRAND_NVIDIA_VCS = 9, // NVIDIA Virtual Compute Server + NVML_BRAND_NVIDIA_VWS = 10, // NVIDIA RTX Virtual Workstation + NVML_BRAND_NVIDIA_CLOUD_GAMING = 11, // NVIDIA Cloud Gaming + NVML_BRAND_NVIDIA_VGAMING = NVML_BRAND_NVIDIA_CLOUD_GAMING, // Deprecated from API reporting. Keeping definition for backward compatibility. + NVML_BRAND_QUADRO_RTX = 12, + NVML_BRAND_NVIDIA_RTX = 13, + NVML_BRAND_NVIDIA = 14, + NVML_BRAND_GEFORCE_RTX = 15, // Unused + NVML_BRAND_TITAN_RTX = 16, // Unused // Keep this last NVML_BRAND_COUNT @@ -548,11 +694,20 @@ typedef enum nvmlBrandType_enum */ typedef enum nvmlTemperatureThresholds_enum { - NVML_TEMPERATURE_THRESHOLD_SHUTDOWN = 0, // Temperature at which the GPU will shut down - // for HW protection - NVML_TEMPERATURE_THRESHOLD_SLOWDOWN = 1, // Temperature at which the GPU will begin HW slowdown - NVML_TEMPERATURE_THRESHOLD_MEM_MAX = 2, // Memory Temperature at which the GPU will begin SW slowdown - NVML_TEMPERATURE_THRESHOLD_GPU_MAX = 3, // GPU Temperature at which the GPU can be throttled below base clock + NVML_TEMPERATURE_THRESHOLD_SHUTDOWN = 0, // Temperature at which the GPU will + // shut down for HW protection + NVML_TEMPERATURE_THRESHOLD_SLOWDOWN = 1, // Temperature at which the GPU will + // begin HW slowdown + NVML_TEMPERATURE_THRESHOLD_MEM_MAX = 2, // Memory Temperature at which the GPU will + // begin SW slowdown + NVML_TEMPERATURE_THRESHOLD_GPU_MAX = 3, // GPU Temperature at which the GPU + // can be throttled below base clock + NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MIN = 4, // Minimum GPU Temperature that can be + // set as acoustic threshold + NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_CURR = 5, // Current temperature that is set as + // acoustic threshold. + NVML_TEMPERATURE_THRESHOLD_ACOUSTIC_MAX = 6, // Maximum GPU temperature that can be + // set as acoustic threshold. // Keep this last NVML_TEMPERATURE_THRESHOLD_COUNT } nvmlTemperatureThresholds_t; @@ -586,6 +741,46 @@ typedef enum nvmlComputeMode_enum NVML_COMPUTEMODE_COUNT } nvmlComputeMode_t; +/** + * Max Clock Monitors available + */ +#define MAX_CLK_DOMAINS 32 + +/** + * Clock Monitor error types + */ +typedef struct nvmlClkMonFaultInfo_struct { + /** + * The Domain which faulted + */ + unsigned int clkApiDomain; + + /** + * Faults Information + */ + unsigned int clkDomainFaultMask; +} nvmlClkMonFaultInfo_t; + +/** + * Clock Monitor Status + */ +typedef struct nvmlClkMonStatus_status { + /** + * Fault status Indicator + */ + unsigned int bGlobalStatus; + + /** + * Total faulted domain numbers + */ + unsigned int clkMonListSize; + + /** + * The fault Information structure + */ + nvmlClkMonFaultInfo_t clkMonList[MAX_CLK_DOMAINS]; +} nvmlClkMonStatus_t; + /** * ECC bit types. * @@ -686,12 +881,15 @@ typedef enum nvmlClockId_enum * * Windows only. */ + typedef enum nvmlDriverModel_enum { - NVML_DRIVER_WDDM = 0, //!< WDDM driver model -- GPU treated as a display device - NVML_DRIVER_WDM = 1 //!< WDM (TCC) model (recommended) -- GPU treated as a generic device + NVML_DRIVER_WDDM = 0, //!< WDDM driver model -- GPU treated as a display device + NVML_DRIVER_WDM = 1 //!< WDM (TCC) model (recommended) -- GPU treated as a generic device } nvmlDriverModel_t; +#define NVML_MAX_GPU_PERF_PSTATES 16 + /** * Allowed PStates. */ @@ -753,31 +951,34 @@ typedef enum nvmlInforomObject_enum typedef enum nvmlReturn_enum { // cppcheck-suppress * - NVML_SUCCESS = 0, //!< The operation was successful - NVML_ERROR_UNINITIALIZED = 1, //!< NVML was not first initialized with nvmlInit() - NVML_ERROR_INVALID_ARGUMENT = 2, //!< A supplied argument is invalid - NVML_ERROR_NOT_SUPPORTED = 3, //!< The requested operation is not available on target device - NVML_ERROR_NO_PERMISSION = 4, //!< The current user does not have permission for operation - NVML_ERROR_ALREADY_INITIALIZED = 5, //!< Deprecated: Multiple initializations are now allowed through ref counting - NVML_ERROR_NOT_FOUND = 6, //!< A query to find an object was unsuccessful - NVML_ERROR_INSUFFICIENT_SIZE = 7, //!< An input argument is not large enough - NVML_ERROR_INSUFFICIENT_POWER = 8, //!< A device's external power cables are not properly attached - NVML_ERROR_DRIVER_NOT_LOADED = 9, //!< NVIDIA driver is not loaded - NVML_ERROR_TIMEOUT = 10, //!< User provided timeout passed - NVML_ERROR_IRQ_ISSUE = 11, //!< NVIDIA Kernel detected an interrupt issue with a GPU - NVML_ERROR_LIBRARY_NOT_FOUND = 12, //!< NVML Shared Library couldn't be found or loaded - NVML_ERROR_FUNCTION_NOT_FOUND = 13, //!< Local version of NVML doesn't implement this function - NVML_ERROR_CORRUPTED_INFOROM = 14, //!< infoROM is corrupted - NVML_ERROR_GPU_IS_LOST = 15, //!< The GPU has fallen off the bus or has otherwise become inaccessible - NVML_ERROR_RESET_REQUIRED = 16, //!< The GPU requires a reset before it can be used again - NVML_ERROR_OPERATING_SYSTEM = 17, //!< The GPU control device has been blocked by the operating system/cgroups - NVML_ERROR_LIB_RM_VERSION_MISMATCH = 18, //!< RM detects a driver/library version mismatch - NVML_ERROR_IN_USE = 19, //!< An operation cannot be performed because the GPU is currently in use - NVML_ERROR_MEMORY = 20, //!< Insufficient memory - NVML_ERROR_NO_DATA = 21, //!< No data - NVML_ERROR_VGPU_ECC_NOT_SUPPORTED = 22, //!< The requested vgpu operation is not available on target device, becasue ECC is enabled - NVML_ERROR_INSUFFICIENT_RESOURCES = 23, //!< Ran out of critical resources, other than memory - NVML_ERROR_UNKNOWN = 999 //!< An internal driver error occurred + NVML_SUCCESS = 0, //!< The operation was successful + NVML_ERROR_UNINITIALIZED = 1, //!< NVML was not first initialized with nvmlInit() + NVML_ERROR_INVALID_ARGUMENT = 2, //!< A supplied argument is invalid + NVML_ERROR_NOT_SUPPORTED = 3, //!< The requested operation is not available on target device + NVML_ERROR_NO_PERMISSION = 4, //!< The current user does not have permission for operation + NVML_ERROR_ALREADY_INITIALIZED = 5, //!< Deprecated: Multiple initializations are now allowed through ref counting + NVML_ERROR_NOT_FOUND = 6, //!< A query to find an object was unsuccessful + NVML_ERROR_INSUFFICIENT_SIZE = 7, //!< An input argument is not large enough + NVML_ERROR_INSUFFICIENT_POWER = 8, //!< A device's external power cables are not properly attached + NVML_ERROR_DRIVER_NOT_LOADED = 9, //!< NVIDIA driver is not loaded + NVML_ERROR_TIMEOUT = 10, //!< User provided timeout passed + NVML_ERROR_IRQ_ISSUE = 11, //!< NVIDIA Kernel detected an interrupt issue with a GPU + NVML_ERROR_LIBRARY_NOT_FOUND = 12, //!< NVML Shared Library couldn't be found or loaded + NVML_ERROR_FUNCTION_NOT_FOUND = 13, //!< Local version of NVML doesn't implement this function + NVML_ERROR_CORRUPTED_INFOROM = 14, //!< infoROM is corrupted + NVML_ERROR_GPU_IS_LOST = 15, //!< The GPU has fallen off the bus or has otherwise become inaccessible + NVML_ERROR_RESET_REQUIRED = 16, //!< The GPU requires a reset before it can be used again + NVML_ERROR_OPERATING_SYSTEM = 17, //!< The GPU control device has been blocked by the operating system/cgroups + NVML_ERROR_LIB_RM_VERSION_MISMATCH = 18, //!< RM detects a driver/library version mismatch + NVML_ERROR_IN_USE = 19, //!< An operation cannot be performed because the GPU is currently in use + NVML_ERROR_MEMORY = 20, //!< Insufficient memory + NVML_ERROR_NO_DATA = 21, //!< No data + NVML_ERROR_VGPU_ECC_NOT_SUPPORTED = 22, //!< The requested vgpu operation is not available on target device, becasue ECC is enabled + NVML_ERROR_INSUFFICIENT_RESOURCES = 23, //!< Ran out of critical resources, other than memory + NVML_ERROR_FREQ_NOT_SUPPORTED = 24, //!< Ran out of critical resources, other than memory + NVML_ERROR_ARGUMENT_VERSION_MISMATCH = 25, //!< The provided version is invalid/unsupported + NVML_ERROR_DEPRECATED = 26, //!< The requested functionality has been deprecated + NVML_ERROR_UNKNOWN = 999 //!< An internal driver error occurred } nvmlReturn_t; /** @@ -826,11 +1027,11 @@ typedef enum nvmlRestrictedAPI_enum /** @} */ /***************************************************************************************************/ -/** @addtogroup gridVirtual +/** @addtogroup virtualGPU * @{ */ /***************************************************************************************************/ -/** @defgroup nvmlGridEnums GRID Virtualization Enums +/** @defgroup nvmlVirtualGpuEnums vGPU Enums * @{ */ /***************************************************************************************************/ @@ -843,7 +1044,7 @@ typedef enum nvmlGpuVirtualizationMode { NVML_GPU_VIRTUALIZATION_MODE_PASSTHROUGH = 1, //!< Device is associated with GPU-Passthorugh NVML_GPU_VIRTUALIZATION_MODE_VGPU = 2, //!< Device is associated with vGPU inside virtual machine. NVML_GPU_VIRTUALIZATION_MODE_HOST_VGPU = 3, //!< Device is associated with VGX hypervisor in vGPU mode - NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA = 4, //!< Device is associated with VGX hypervisor in vSGA mode + NVML_GPU_VIRTUALIZATION_MODE_HOST_VSGA = 4 //!< Device is associated with VGX hypervisor in vSGA mode } nvmlGpuVirtualizationMode_t; /** @@ -860,31 +1061,82 @@ typedef enum nvmlHostVgpuMode_enum */ typedef enum nvmlVgpuVmIdType { NVML_VGPU_VM_ID_DOMAIN_ID = 0, //!< VM ID represents DOMAIN ID - NVML_VGPU_VM_ID_UUID = 1, //!< VM ID represents UUID + NVML_VGPU_VM_ID_UUID = 1 //!< VM ID represents UUID } nvmlVgpuVmIdType_t; /** - * vGPU GUEST info state. + * vGPU GUEST info state */ typedef enum nvmlVgpuGuestInfoState_enum { NVML_VGPU_INSTANCE_GUEST_INFO_STATE_UNINITIALIZED = 0, //!< Guest-dependent fields uninitialized - NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED = 1, //!< Guest-dependent fields initialized + NVML_VGPU_INSTANCE_GUEST_INFO_STATE_INITIALIZED = 1 //!< Guest-dependent fields initialized } nvmlVgpuGuestInfoState_t; /** - * GRID license feature code + * vGPU software licensable features */ typedef enum { - NVML_GRID_LICENSE_FEATURE_CODE_VGPU = 1, //!< Virtual GPU - NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION = 2 //!< Virtual Workstation + NVML_GRID_LICENSE_FEATURE_CODE_UNKNOWN = 0, //!< Unknown + NVML_GRID_LICENSE_FEATURE_CODE_VGPU = 1, //!< Virtual GPU + NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX = 2, //!< Nvidia RTX + NVML_GRID_LICENSE_FEATURE_CODE_VWORKSTATION = NVML_GRID_LICENSE_FEATURE_CODE_NVIDIA_RTX, //!< Deprecated, do not use. + NVML_GRID_LICENSE_FEATURE_CODE_GAMING = 3, //!< Gaming + NVML_GRID_LICENSE_FEATURE_CODE_COMPUTE = 4 //!< Compute } nvmlGridLicenseFeatureCode_t; +/** + * Status codes for license expiry + */ +#define NVML_GRID_LICENSE_EXPIRY_NOT_AVAILABLE 0 //!< Expiry information not available +#define NVML_GRID_LICENSE_EXPIRY_INVALID 1 //!< Invalid expiry or error fetching expiry +#define NVML_GRID_LICENSE_EXPIRY_VALID 2 //!< Valid expiry +#define NVML_GRID_LICENSE_EXPIRY_NOT_APPLICABLE 3 //!< Expiry not applicable +#define NVML_GRID_LICENSE_EXPIRY_PERMANENT 4 //!< Permanent expiry + +/** + * vGPU queryable capabilities + */ +typedef enum nvmlVgpuCapability_enum +{ + NVML_VGPU_CAP_NVLINK_P2P = 0, //!< P2P over NVLink is supported + NVML_VGPU_CAP_GPUDIRECT = 1, //!< GPUDirect capability is supported + NVML_VGPU_CAP_MULTI_VGPU_EXCLUSIVE = 2, //!< vGPU profile cannot be mixed with other vGPU profiles in same VM + NVML_VGPU_CAP_EXCLUSIVE_TYPE = 3, //!< vGPU profile cannot run on a GPU alongside other profiles of different type + NVML_VGPU_CAP_EXCLUSIVE_SIZE = 4, //!< vGPU profile cannot run on a GPU alongside other profiles of different size + // Keep this last + NVML_VGPU_CAP_COUNT +} nvmlVgpuCapability_t; + + +/** +* vGPU driver queryable capabilities +*/ +typedef enum nvmlVgpuDriverCapability_enum +{ + NVML_VGPU_DRIVER_CAP_HETEROGENEOUS_MULTI_VGPU = 0, //!< Supports mixing of different vGPU profiles within one guest VM + // Keep this last + NVML_VGPU_DRIVER_CAP_COUNT +} nvmlVgpuDriverCapability_t; + + +/** +* Device vGPU queryable capabilities +*/ +typedef enum nvmlDeviceVgpuCapability_enum +{ + NVML_DEVICE_VGPU_CAP_FRACTIONAL_MULTI_VGPU = 0, //!< Fractional vGPU profiles on this GPU can be used in multi-vGPU configurations + NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_PROFILES = 1, //!< Supports concurrent execution of timesliced vGPU profiles of differing types + NVML_DEVICE_VGPU_CAP_HETEROGENEOUS_TIMESLICE_SIZES = 2, //!< Supports concurrent execution of timesliced vGPU profiles of differing framebuffer sizes + // Keep this last + NVML_DEVICE_VGPU_CAP_COUNT +} nvmlDeviceVgpuCapability_t; + /** @} */ /***************************************************************************************************/ -/** @defgroup nvmlVgpuConstants GRID Virtualization Constants +/** @defgroup nvmlVgpuConstants vGPU Constants * @{ */ /***************************************************************************************************/ @@ -898,6 +1150,10 @@ typedef enum { #define NVML_GRID_LICENSE_FEATURE_MAX_COUNT 3 +#define INVALID_GPU_INSTANCE_PROFILE_ID 0xFFFFFFFF + +#define INVALID_GPU_INSTANCE_ID 0xFFFFFFFF + /*! * Macros for vGPU instance's virtualization capabilities bitfield. */ @@ -915,7 +1171,7 @@ typedef enum { /** @} */ /***************************************************************************************************/ -/** @defgroup nvmlVgpuStructs GRID Virtualization Structs +/** @defgroup nvmlVgpuStructs vGPU Structs * @{ */ /***************************************************************************************************/ @@ -929,12 +1185,12 @@ typedef unsigned int nvmlVgpuInstance_t; */ typedef struct nvmlVgpuInstanceUtilizationSample_st { - nvmlVgpuInstance_t vgpuInstance; //!< vGPU Instance - unsigned long long timeStamp; //!< CPU Timestamp in microseconds - nvmlValue_t smUtil; //!< SM (3D/Compute) Util Value - nvmlValue_t memUtil; //!< Frame Buffer Memory Util Value - nvmlValue_t encUtil; //!< Encoder Util Value - nvmlValue_t decUtil; //!< Decoder Util Value + nvmlVgpuInstance_t vgpuInstance; //!< vGPU Instance + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + nvmlValue_t smUtil; //!< SM (3D/Compute) Util Value + nvmlValue_t memUtil; //!< Frame Buffer Memory Util Value + nvmlValue_t encUtil; //!< Encoder Util Value + nvmlValue_t decUtil; //!< Decoder Util Value } nvmlVgpuInstanceUtilizationSample_t; /** @@ -942,51 +1198,215 @@ typedef struct nvmlVgpuInstanceUtilizationSample_st */ typedef struct nvmlVgpuProcessUtilizationSample_st { - nvmlVgpuInstance_t vgpuInstance; //!< vGPU Instance - unsigned int pid; //!< PID of process running within the vGPU VM - char processName[NVML_VGPU_NAME_BUFFER_SIZE]; //!< Name of process running within the vGPU VM - unsigned long long timeStamp; //!< CPU Timestamp in microseconds - unsigned int smUtil; //!< SM (3D/Compute) Util Value - unsigned int memUtil; //!< Frame Buffer Memory Util Value - unsigned int encUtil; //!< Encoder Util Value - unsigned int decUtil; //!< Decoder Util Value + nvmlVgpuInstance_t vgpuInstance; //!< vGPU Instance + unsigned int pid; //!< PID of process running within the vGPU VM + char processName[NVML_VGPU_NAME_BUFFER_SIZE]; //!< Name of process running within the vGPU VM + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + unsigned int smUtil; //!< SM (3D/Compute) Util Value + unsigned int memUtil; //!< Frame Buffer Memory Util Value + unsigned int encUtil; //!< Encoder Util Value + unsigned int decUtil; //!< Decoder Util Value } nvmlVgpuProcessUtilizationSample_t; +/** + * vGPU scheduler policies + */ +#define NVML_VGPU_SCHEDULER_POLICY_UNKNOWN 0 +#define NVML_VGPU_SCHEDULER_POLICY_BEST_EFFORT 1 +#define NVML_VGPU_SCHEDULER_POLICY_EQUAL_SHARE 2 +#define NVML_VGPU_SCHEDULER_POLICY_FIXED_SHARE 3 + +#define NVML_SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT 3 + +#define NVML_SCHEDULER_SW_MAX_LOG_ENTRIES 200 + +typedef struct { + unsigned int avgFactor; + unsigned int timeslice; +} nvmlVgpuSchedulerParamsVgpuSchedDataWithARR_t; + +typedef struct { + unsigned int timeslice; +} nvmlVgpuSchedulerParamsVgpuSchedData_t; + +/** + * Union to represent the vGPU Scheduler Parameters + */ +typedef union +{ + nvmlVgpuSchedulerParamsVgpuSchedDataWithARR_t vgpuSchedDataWithARR; + + nvmlVgpuSchedulerParamsVgpuSchedData_t vgpuSchedData; + +} nvmlVgpuSchedulerParams_t; + +/** + * Structure to store the state and logs of a software runlist + */ +typedef struct nvmlVgpuSchedulerLogEntries_st +{ + unsigned long long timestamp; //!< Timestamp in ns when this software runlist was preeempted + unsigned long long timeRunTotal; //!< Total time in ns this software runlist has run + unsigned long long timeRun; //!< Time in ns this software runlist ran before preemption + unsigned int swRunlistId; //!< Software runlist Id + unsigned long long targetTimeSlice; //!< The actual timeslice after deduction + unsigned long long cumulativePreemptionTime; //!< Preemption time in ns for this SW runlist +} nvmlVgpuSchedulerLogEntry_t; + +/** + * Structure to store a vGPU software scheduler log + */ +typedef struct nvmlVgpuSchedulerLog_st +{ + unsigned int engineId; //!< Engine whose software runlist log entries are fetched + unsigned int schedulerPolicy; //!< Scheduler policy + unsigned int isEnabledARR; //!< Flag to check Adaptive Round Robin scheduler mode + nvmlVgpuSchedulerParams_t schedulerParams; + unsigned int entriesCount; //!< Count of log entries fetched + nvmlVgpuSchedulerLogEntry_t logEntries[NVML_SCHEDULER_SW_MAX_LOG_ENTRIES]; +} nvmlVgpuSchedulerLog_t; + +/** + * Structure to store the vGPU scheduler state + */ +typedef struct nvmlVgpuSchedulerGetState_st +{ + unsigned int schedulerPolicy; //!< Scheduler policy + unsigned int isEnabledARR; //!< Flag to check Adaptive Round Robin scheduler mode + nvmlVgpuSchedulerParams_t schedulerParams; +} nvmlVgpuSchedulerGetState_t; + +typedef struct { + unsigned int avgFactor; + unsigned int frequency; +} nvmlVgpuSchedulerSetParamsVgpuSchedDataWithARR_t; + +typedef struct { + unsigned int timeslice; +} nvmlVgpuSchedulerSetParamsVgpuSchedData_t; + +/** + * Union to represent the vGPU Scheduler set Parameters + */ +typedef union +{ + nvmlVgpuSchedulerSetParamsVgpuSchedDataWithARR_t vgpuSchedDataWithARR; + + nvmlVgpuSchedulerSetParamsVgpuSchedData_t vgpuSchedData; + +} nvmlVgpuSchedulerSetParams_t; + +/** + * Structure to set the vGPU scheduler state + */ +typedef struct nvmlVgpuSchedulerSetState_st +{ + unsigned int schedulerPolicy; //!< Scheduler policy + unsigned int enableARRMode; //!< Flag to enable/disable Adaptive Round Robin scheduler + nvmlVgpuSchedulerSetParams_t schedulerParams; +} nvmlVgpuSchedulerSetState_t; + +/** + * Structure to store the vGPU scheduler capabilities + */ +typedef struct nvmlVgpuSchedulerCapabilities_st +{ + unsigned int supportedSchedulers[NVML_SUPPORTED_VGPU_SCHEDULER_POLICY_COUNT]; //!< List the supported vGPU schedulers on the device + unsigned int maxTimeslice; //!< Maximum timeslice value in ns + unsigned int minTimeslice; //!< Minimum timeslice value in ns + unsigned int isArrModeSupported; //!< Flag to check Adaptive Round Robin mode enabled/disabled. + unsigned int maxFrequencyForARR; //!< Maximum frequency for Adaptive Round Robin mode + unsigned int minFrequencyForARR; //!< Minimum frequency for Adaptive Round Robin mode + unsigned int maxAvgFactorForARR; //!< Maximum averaging factor for Adaptive Round Robin mode + unsigned int minAvgFactorForARR; //!< Minimum averaging factor for Adaptive Round Robin mode +} nvmlVgpuSchedulerCapabilities_t; + +/** + * Structure to store the vGPU license expiry details + */ +typedef struct nvmlVgpuLicenseExpiry_st +{ + unsigned int year; //!< Year of license expiry + unsigned short month; //!< Month of license expiry + unsigned short day; //!< Day of license expiry + unsigned short hour; //!< Hour of license expiry + unsigned short min; //!< Minutes of license expiry + unsigned short sec; //!< Seconds of license expiry + unsigned char status; //!< License expiry status +} nvmlVgpuLicenseExpiry_t; + +/** + * vGPU license state + */ +#define NVML_GRID_LICENSE_STATE_UNKNOWN 0 //!< Unknown state +#define NVML_GRID_LICENSE_STATE_UNINITIALIZED 1 //!< Uninitialized state +#define NVML_GRID_LICENSE_STATE_UNLICENSED_UNRESTRICTED 2 //!< Unlicensed unrestricted state +#define NVML_GRID_LICENSE_STATE_UNLICENSED_RESTRICTED 3 //!< Unlicensed restricted state +#define NVML_GRID_LICENSE_STATE_UNLICENSED 4 //!< Unlicensed state +#define NVML_GRID_LICENSE_STATE_LICENSED 5 //!< Licensed state + +typedef struct nvmlVgpuLicenseInfo_st +{ + unsigned char isLicensed; //!< License status + nvmlVgpuLicenseExpiry_t licenseExpiry; //!< License expiry information + unsigned int currentState; //!< Current license state +} nvmlVgpuLicenseInfo_t; + /** * Structure to store utilization value and process Id */ typedef struct nvmlProcessUtilizationSample_st { - unsigned int pid; //!< PID of process - unsigned long long timeStamp; //!< CPU Timestamp in microseconds - unsigned int smUtil; //!< SM (3D/Compute) Util Value - unsigned int memUtil; //!< Frame Buffer Memory Util Value - unsigned int encUtil; //!< Encoder Util Value - unsigned int decUtil; //!< Decoder Util Value + unsigned int pid; //!< PID of process + unsigned long long timeStamp; //!< CPU Timestamp in microseconds + unsigned int smUtil; //!< SM (3D/Compute) Util Value + unsigned int memUtil; //!< Frame Buffer Memory Util Value + unsigned int encUtil; //!< Encoder Util Value + unsigned int decUtil; //!< Decoder Util Value } nvmlProcessUtilizationSample_t; /** - * Structure containing GRID licensable feature information + * Structure to store license expiry date and time values + */ +typedef struct nvmlGridLicenseExpiry_st +{ + unsigned int year; //!< Year value of license expiry + unsigned short month; //!< Month value of license expiry + unsigned short day; //!< Day value of license expiry + unsigned short hour; //!< Hour value of license expiry + unsigned short min; //!< Minutes value of license expiry + unsigned short sec; //!< Seconds value of license expiry + unsigned char status; //!< License expiry status +} nvmlGridLicenseExpiry_t; + +/** + * Structure containing vGPU software licensable feature information */ typedef struct nvmlGridLicensableFeature_st { nvmlGridLicenseFeatureCode_t featureCode; //!< Licensed feature code unsigned int featureState; //!< Non-zero if feature is currently licensed, otherwise zero - char licenseInfo[NVML_GRID_LICENSE_BUFFER_SIZE]; - char productName[NVML_GRID_LICENSE_BUFFER_SIZE]; + char licenseInfo[NVML_GRID_LICENSE_BUFFER_SIZE]; //!< Deprecated. + char productName[NVML_GRID_LICENSE_BUFFER_SIZE]; //!< Product name of feature unsigned int featureEnabled; //!< Non-zero if feature is enabled, otherwise zero + nvmlGridLicenseExpiry_t licenseExpiry; //!< License expiry structure containing date and time } nvmlGridLicensableFeature_t; /** - * Structure to store GRID licensable features + * Structure to store vGPU software licensable features */ typedef struct nvmlGridLicensableFeatures_st { - int isGridLicenseSupported; //!< Non-zero if GRID Software Licensing is supported on the system, otherwise zero + int isGridLicenseSupported; //!< Non-zero if vGPU Software Licensing is supported on the system, otherwise zero unsigned int licensableFeaturesCount; //!< Entries returned in \a gridLicensableFeatures array - nvmlGridLicensableFeature_t gridLicensableFeatures[NVML_GRID_LICENSE_FEATURE_MAX_COUNT]; //!< Array of GRID licensable features. + nvmlGridLicensableFeature_t gridLicensableFeatures[NVML_GRID_LICENSE_FEATURE_MAX_COUNT]; //!< Array of vGPU software licensable features. } nvmlGridLicensableFeatures_t; +/** + * GSP firmware + */ +#define NVML_GSP_FIRMWARE_VERSION_BUF_SIZE 0x40 + /** * Simplified chip architecture */ @@ -998,10 +1418,84 @@ typedef struct nvmlGridLicensableFeatures_st #define NVML_DEVICE_ARCH_AMPERE 7 // Devices based on the NVIDIA Ampere architecture +#define NVML_DEVICE_ARCH_ADA 8 // Devices based on the NVIDIA Ada architecture + +#define NVML_DEVICE_ARCH_HOPPER 9 // Devices based on the NVIDIA Hopper architecture + #define NVML_DEVICE_ARCH_UNKNOWN 0xffffffff // Anything else, presumably something newer typedef unsigned int nvmlDeviceArchitecture_t; +/** + * PCI bus types + */ +#define NVML_BUS_TYPE_UNKNOWN 0 +#define NVML_BUS_TYPE_PCI 1 +#define NVML_BUS_TYPE_PCIE 2 +#define NVML_BUS_TYPE_FPCI 3 +#define NVML_BUS_TYPE_AGP 4 + +typedef unsigned int nvmlBusType_t; + +/** + * Device Power Modes + */ + +/** + * Device Fan control policy + */ +#define NVML_FAN_POLICY_TEMPERATURE_CONTINOUS_SW 0 +#define NVML_FAN_POLICY_MANUAL 1 + +typedef unsigned int nvmlFanControlPolicy_t; + +/** + * Device Power Source + */ +#define NVML_POWER_SOURCE_AC 0x00000000 +#define NVML_POWER_SOURCE_BATTERY 0x00000001 + +typedef unsigned int nvmlPowerSource_t; + +/* + * Device PCIE link Max Speed + */ +#define NVML_PCIE_LINK_MAX_SPEED_INVALID 0x00000000 +#define NVML_PCIE_LINK_MAX_SPEED_2500MBPS 0x00000001 +#define NVML_PCIE_LINK_MAX_SPEED_5000MBPS 0x00000002 +#define NVML_PCIE_LINK_MAX_SPEED_8000MBPS 0x00000003 +#define NVML_PCIE_LINK_MAX_SPEED_16000MBPS 0x00000004 +#define NVML_PCIE_LINK_MAX_SPEED_32000MBPS 0x00000005 +#define NVML_PCIE_LINK_MAX_SPEED_64000MBPS 0x00000006 + +/* + * Adaptive clocking status + */ +#define NVML_ADAPTIVE_CLOCKING_INFO_STATUS_DISABLED 0x00000000 +#define NVML_ADAPTIVE_CLOCKING_INFO_STATUS_ENABLED 0x00000001 + +#define NVML_MAX_GPU_UTILIZATIONS 8 +typedef enum nvmlGpuUtilizationDomainId_t +{ + NVML_GPU_UTILIZATION_DOMAIN_GPU = 0, //!< Graphics engine domain + NVML_GPU_UTILIZATION_DOMAIN_FB = 1, //!< Frame buffer domain + NVML_GPU_UTILIZATION_DOMAIN_VID = 2, //!< Video engine domain + NVML_GPU_UTILIZATION_DOMAIN_BUS = 3, //!< Bus interface domain +} nvmlGpuUtilizationDomainId_t; + +typedef struct { + unsigned int bIsPresent; + unsigned int percentage; + unsigned int incThreshold; + unsigned int decThreshold; +} nvmlGpuDynamicPstatesInfoUtilization_t; + +typedef struct nvmlGpuDynamicPstatesInfo_st +{ + unsigned int flags; //!< Reserved for future use + nvmlGpuDynamicPstatesInfoUtilization_t utilization[NVML_MAX_GPU_UTILIZATIONS]; +} nvmlGpuDynamicPstatesInfo_t; + /** @} */ /** @} */ @@ -1229,7 +1723,50 @@ typedef unsigned int nvmlDeviceArchitecture_t; #define NVML_FI_DEV_REMAPPED_PENDING 144 //!< If any rows are pending remapping. 1=yes 0=no #define NVML_FI_DEV_REMAPPED_FAILURE 145 //!< If any rows failed to be remapped 1=yes 0=no -#define NVML_FI_MAX 146 //!< One greater than the largest field ID defined above +/** + * Remote device NVLink ID + * + * Link ID needs to be specified in the scopeId field in nvmlFieldValue_t. + */ +#define NVML_FI_DEV_NVLINK_REMOTE_NVLINK_ID 146 //!< Remote device NVLink ID + +/** + * NVSwitch: connected NVLink count + */ +#define NVML_FI_DEV_NVSWITCH_CONNECTED_LINK_COUNT 147 //!< Number of NVLinks connected to NVSwitch + +/* NvLink ECC Data Error Counters + * + * Lane ID needs to be specified in the scopeId field in nvmlFieldValue_t. + * + */ +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L0 148 //!< NVLink data ECC Error Counter for Link 0 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L1 149 //!< NVLink data ECC Error Counter for Link 1 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L2 150 //!< NVLink data ECC Error Counter for Link 2 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L3 151 //!< NVLink data ECC Error Counter for Link 3 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L4 152 //!< NVLink data ECC Error Counter for Link 4 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L5 153 //!< NVLink data ECC Error Counter for Link 5 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L6 154 //!< NVLink data ECC Error Counter for Link 6 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L7 155 //!< NVLink data ECC Error Counter for Link 7 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L8 156 //!< NVLink data ECC Error Counter for Link 8 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L9 157 //!< NVLink data ECC Error Counter for Link 9 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L10 158 //!< NVLink data ECC Error Counter for Link 10 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_L11 159 //!< NVLink data ECC Error Counter for Link 11 +#define NVML_FI_DEV_NVLINK_ECC_DATA_ERROR_COUNT_TOTAL 160 //!< NvLink data ECC Error Counter total for all Links + +#define NVML_FI_DEV_NVLINK_ERROR_DL_REPLAY 161 +#define NVML_FI_DEV_NVLINK_ERROR_DL_RECOVERY 162 +#define NVML_FI_DEV_NVLINK_ERROR_DL_CRC 163 +#define NVML_FI_DEV_NVLINK_GET_SPEED 164 +#define NVML_FI_DEV_NVLINK_GET_STATE 165 +#define NVML_FI_DEV_NVLINK_GET_VERSION 166 + +#define NVML_FI_DEV_NVLINK_GET_POWER_STATE 167 +#define NVML_FI_DEV_NVLINK_GET_POWER_THRESHOLD 168 + +#define NVML_FI_DEV_PCIE_L0_TO_RECOVERY_COUNTER 169 + +#define NVML_FI_MAX 170 //!< One greater than the largest field ID defined above /** * Information for a Field Value Sample @@ -1254,7 +1791,10 @@ typedef struct nvmlFieldValue_st */ /***************************************************************************************************/ -typedef struct nvmlUnit_st* nvmlUnit_t; +typedef struct +{ + struct nvmlUnit_st* handle; +} nvmlUnit_t; /** * Description of HWBC entry @@ -1356,7 +1896,10 @@ typedef struct nvmlUnitFanSpeeds_st /** * Handle to an event set */ -typedef struct nvmlEventSet_st* nvmlEventSet_t; +typedef struct +{ + struct nvmlEventSet_st* handle; +} nvmlEventSet_t; /** @defgroup nvmlEventType Event Types * @{ @@ -1608,7 +2151,7 @@ typedef struct nvmlAccountingStats_st { typedef enum nvmlEncoderQueryType_enum { NVML_ENCODER_QUERY_H264 = 0, //!< H264 encoder - NVML_ENCODER_QUERY_HEVC = 1, //!< HEVC encoder + NVML_ENCODER_QUERY_HEVC = 1 //!< HEVC encoder }nvmlEncoderType_t; /** @@ -1643,7 +2186,7 @@ typedef enum nvmlFBCSessionType_enum NVML_FBC_SESSION_TYPE_TOSYS, //!< ToSys NVML_FBC_SESSION_TYPE_CUDA, //!< Cuda NVML_FBC_SESSION_TYPE_VID, //!< Vid - NVML_FBC_SESSION_TYPE_HWENC, //!< HEnc + NVML_FBC_SESSION_TYPE_HWENC //!< HEnc } nvmlFBCSessionType_t; /** @@ -1695,7 +2238,7 @@ typedef struct nvmlFBCSessionInfo_st typedef enum nvmlDetachGpuState_enum { NVML_DETACH_GPU_KEEP = 0, - NVML_DETACH_GPU_REMOVE, + NVML_DETACH_GPU_REMOVE } nvmlDetachGpuState_t; /** @@ -1704,11 +2247,28 @@ typedef enum nvmlDetachGpuState_enum typedef enum nvmlPcieLinkState_enum { NVML_PCIE_LINK_KEEP = 0, - NVML_PCIE_LINK_SHUT_DOWN, + NVML_PCIE_LINK_SHUT_DOWN } nvmlPcieLinkState_t; /** @} */ +#define NVML_GPU_FABRIC_UUID_LEN 16 + +#define NVML_GPU_FABRIC_STATE_NOT_SUPPORTED 0 +#define NVML_GPU_FABRIC_STATE_NOT_STARTED 1 +#define NVML_GPU_FABRIC_STATE_IN_PROGRESS 2 +#define NVML_GPU_FABRIC_STATE_COMPLETED 3 + +typedef unsigned char nvmlGpuFabricState_t; + +typedef struct { + char clusterUuid[NVML_GPU_FABRIC_UUID_LEN]; //!< Uuid of the cluster to which this GPU belongs + nvmlReturn_t status; //!< Error status, if any. Must be checked only if state returns "complete". + unsigned int partitionId; //!< ID of the fabric partition to which this GPU belongs + nvmlGpuFabricState_t state; //!< Current state of GPU registration process +} nvmlGpuFabricInfo_t; +/** @} */ + /***************************************************************************************************/ /** @defgroup nvmlInitializationAndCleanup Initialization and Cleanup * This chapter describes the methods that handle NVML initialization and cleanup. @@ -2193,7 +2753,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetCount_v2(unsigned int *deviceCount); * * @note This API currently only supports MIG device handles. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * * @param device NVML device handle @@ -2295,14 +2855,12 @@ nvmlReturn_t DECLDIR nvmlDeviceGetHandleBySerial(const char *serial, nvmlDevice_ * * For all products. * - * @param uuid The UUID of the target GPU - * @param device Reference in which to return the device handle + * @param uuid The UUID of the target GPU or MIG instance + * @param device Reference in which to return the device handle or MIG device handle * * Starting from NVML 5, this API causes NVML to initialize the target GPU * NVML may initialize additional GPUs as it searches for the target GPU * - * This API does not currently support acquiring MIG device handles using MIG device UUIDs. - * * @return * - \ref NVML_SUCCESS if \a device has been set * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized @@ -2964,6 +3522,24 @@ nvmlReturn_t DECLDIR nvmlDeviceGetPciInfo_v3(nvmlDevice_t device, nvmlPciInfo_t */ nvmlReturn_t DECLDIR nvmlDeviceGetMaxPcieLinkGeneration(nvmlDevice_t device, unsigned int *maxLinkGen); +/** + * Retrieves the maximum PCIe link generation supported by this device + * + * For Fermi &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param maxLinkGenDevice Reference in which to return the max PCIe link generation + * + * @return + * - \ref NVML_SUCCESS if \a maxLinkGenDevice has been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a maxLinkGenDevice is null + * - \ref NVML_ERROR_NOT_SUPPORTED if PCIe link information is not available + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuMaxPcieLinkGeneration(nvmlDevice_t device, unsigned int *maxLinkGenDevice); + /** * Retrieves the maximum PCIe link width possible with this device and system * @@ -3407,6 +3983,133 @@ nvmlReturn_t DECLDIR nvmlDeviceGetFanSpeed(nvmlDevice_t device, unsigned int *sp */ nvmlReturn_t DECLDIR nvmlDeviceGetFanSpeed_v2(nvmlDevice_t device, unsigned int fan, unsigned int * speed); +/** + * Retrieves the intended target speed of the device's specified fan. + * + * Normally, the driver dynamically adjusts the fan based on + * the needs of the GPU. But when user set fan speed using nvmlDeviceSetFanSpeed_v2, + * the driver will attempt to make the fan achieve the setting in + * nvmlDeviceSetFanSpeed_v2. The actual current speed of the fan + * is reported in nvmlDeviceGetFanSpeed_v2. + * + * For all discrete products with dedicated fans. + * + * The fan speed is expressed as a percentage of the product's maximum noise tolerance fan speed. + * This value may exceed 100% in certain cases. + * + * @param device The identifier of the target device + * @param fan The index of the target fan, zero indexed. + * @param targetSpeed Reference in which to return the fan speed percentage + * + * @return + * - \ref NVML_SUCCESS if \a speed has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a fan is not an acceptable index, or \a speed is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan or is newer than Maxwell + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetTargetFanSpeed(nvmlDevice_t device, unsigned int fan, unsigned int *targetSpeed); + +/** + * Sets the speed of the fan control policy to default. + * + * For all cuda-capable discrete products with fans + * + * @param device The identifier of the target device + * @param fan The index of the fan, starting at zero + * + * return + * NVML_SUCCESS if speed has been adjusted + * NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * NVML_ERROR_INVALID_ARGUMENT if device is invalid + * NVML_ERROR_NOT_SUPPORTED if the device does not support this + * (doesn't have fans) + * NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetDefaultFanSpeed_v2(nvmlDevice_t device, unsigned int fan); + +/** + * Retrieves the min and max fan speed that user can set for the GPU fan. + * + * For all cuda-capable discrete products with fans + * + * @param device The identifier of the target device + * @param minSpeed The minimum speed allowed to set + * @param maxSpeed The maximum speed allowed to set + * + * return + * NVML_SUCCESS if speed has been adjusted + * NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * NVML_ERROR_INVALID_ARGUMENT if device is invalid + * NVML_ERROR_NOT_SUPPORTED if the device does not support this + * (doesn't have fans) + * NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMinMaxFanSpeed(nvmlDevice_t device, unsigned int * minSpeed, + unsigned int * maxSpeed); + +/** + * Gets current fan control policy. + * + * For Maxwell &tm; or newer fully supported devices. + * + * For all cuda-capable discrete products with fans + * + * device The identifier of the target \a device + * policy Reference in which to return the fan control \a policy + * + * return + * NVML_SUCCESS if \a policy has been populated + * NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a policy is null or the \a fan given doesn't reference + * a fan that exists. + * NVML_ERROR_NOT_SUPPORTED if the \a device is older than Maxwell + * NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetFanControlPolicy_v2(nvmlDevice_t device, unsigned int fan, + nvmlFanControlPolicy_t *policy); + +/** + * Sets current fan control policy. + * + * For Maxwell &tm; or newer fully supported devices. + * + * Requires privileged user. + * + * For all cuda-capable discrete products with fans + * + * device The identifier of the target \a device + * policy The fan control \a policy to set + * + * return + * NVML_SUCCESS if \a policy has been set + * NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a policy is null or the \a fan given doesn't reference + * a fan that exists. + * NVML_ERROR_NOT_SUPPORTED if the \a device is older than Maxwell + * NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetFanControlPolicy(nvmlDevice_t device, unsigned int fan, + nvmlFanControlPolicy_t policy); + +/** + * Retrieves the number of fans on the device. + * + * For all discrete products with dedicated fans. + * + * @param device The identifier of the target device + * @param numFans The number of fans + * + * @return + * - \ref NVML_SUCCESS if \a fan number query was successful + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a numFans is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a fan + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNumFans(nvmlDevice_t device, unsigned int *numFans); /** * Retrieves the current temperature readings for the device, in degrees C. @@ -3450,9 +4153,46 @@ nvmlReturn_t DECLDIR nvmlDeviceGetTemperature(nvmlDevice_t device, nvmlTemperatu nvmlReturn_t DECLDIR nvmlDeviceGetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, unsigned int *temp); /** - * Retrieves the current performance state for the device. + * Sets the temperature threshold for the GPU with the specified threshold type in degrees C. * - * For Fermi &tm; or newer fully supported devices. + * For Maxwell &tm; or newer fully supported devices. + * + * See \ref nvmlTemperatureThresholds_t for details on available temperature thresholds. + * + * @param device The identifier of the target device + * @param thresholdType The type of threshold value to be set + * @param temp Reference which hold the value to be set + * @return + * - \ref NVML_SUCCESS if \a temp has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a thresholdType is invalid or \a temp is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not have a temperature sensor or is unsupported + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetTemperatureThreshold(nvmlDevice_t device, nvmlTemperatureThresholds_t thresholdType, int *temp); + +/** + * Used to execute a list of thermal system instructions. + * + * @param device The identifier of the target device + * @param sensorIndex The index of the thermal sensor + * @param pThermalSettings Reference in which to return the thermal sensor information + * + * @return + * - \ref NVML_SUCCESS if \a pThermalSettings has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pThermalSettings is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetThermalSettings(nvmlDevice_t device, unsigned int sensorIndex, nvmlGpuThermalSettings_t *pThermalSettings); + +/** + * Retrieves the current performance state for the device. + * + * For Fermi &tm; or newer fully supported devices. * * See \ref nvmlPstates_t for details on allowed performance states. * @@ -3719,7 +4459,8 @@ nvmlReturn_t DECLDIR nvmlDeviceGetEnforcedPowerLimit(nvmlDevice_t device, unsign nvmlReturn_t DECLDIR nvmlDeviceGetGpuOperationMode(nvmlDevice_t device, nvmlGpuOperationMode_t *current, nvmlGpuOperationMode_t *pending); /** - * Retrieves the amount of used, free and total memory available on the device, in bytes. + * Retrieves the amount of used, free, reserved and total memory available on the device, in bytes. + * The reserved amount is supported on version 2 only. * * For all products. * @@ -3729,12 +4470,14 @@ nvmlReturn_t DECLDIR nvmlDeviceGetGpuOperationMode(nvmlDevice_t device, nvmlGpuO * Under Linux and Windows TCC, the reported amount of used memory is equal to the sum of memory allocated * by all active channels on the device. * - * See \ref nvmlMemory_t for details on available memory info. + * See \ref nvmlMemory_v2_t for details on available memory info. * * @note In MIG mode, if device handle is provided, the API returns aggregate * information, only if the caller has appropriate privileges. Per-instance * information can be queried by using specific MIG device handles. * + * @note nvmlDeviceGetMemoryInfo_v2 adds additional memory information. + * * @param device The identifier of the target device * @param memory Reference in which to return the memory information * @@ -3747,6 +4490,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetGpuOperationMode(nvmlDevice_t device, nvmlGpuO * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetMemoryInfo(nvmlDevice_t device, nvmlMemory_t *memory); +nvmlReturn_t DECLDIR nvmlDeviceGetMemoryInfo_v2(nvmlDevice_t device, nvmlMemory_v2_t *memory); /** * Retrieves the current compute mode for the device. @@ -3822,6 +4566,30 @@ nvmlReturn_t DECLDIR nvmlDeviceGetCudaComputeCapability(nvmlDevice_t device, int */ nvmlReturn_t DECLDIR nvmlDeviceGetEccMode(nvmlDevice_t device, nvmlEnableState_t *current, nvmlEnableState_t *pending); +/** + * Retrieves the default ECC modes for the device. + * + * For Fermi &tm; or newer fully supported devices. + * Only applicable to devices with ECC. + * Requires \a NVML_INFOROM_ECC version 1.0 or higher. + * + * See \ref nvmlEnableState_t for details on allowed modes. + * + * @param device The identifier of the target device + * @param defaultMode Reference in which to return the default ECC mode + * + * @return + * - \ref NVML_SUCCESS if \a current and \a pending have been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a default is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceSetEccMode() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDefaultEccMode(nvmlDevice_t device, nvmlEnableState_t *defaultMode); + /** * Retrieves the device boardId from 0-N. * Devices with the same boardId indicate GPUs connected to the same PLX. Use in conjunction with @@ -4086,6 +4854,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetEncoderStats (nvmlDevice_t device, unsigned in * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a sessionCount is too small, array element count is returned in \a sessionCount * - \ref NVML_ERROR_INVALID_ARGUMENT if \a sessionCount is NULL. * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetEncoderSessions(nvmlDevice_t device, unsigned int *sessionCount, nvmlEncoderSessionInfo_t *sessionInfos); @@ -4248,6 +5017,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetBridgeChipInfo(nvmlDevice_t device, nvmlBridge * @note In MIG mode, if device handle is provided, the API returns aggregate information, only if * the caller has appropriate privileges. Per-instance information can be queried by using * specific MIG device handles. + * Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode. * * @param device The device handle or MIG device handle * @param infoCount Reference in which to provide the \a infos array size, and @@ -4263,11 +5033,12 @@ nvmlReturn_t DECLDIR nvmlDeviceGetBridgeChipInfo(nvmlDevice_t device, nvmlBridge * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see \ref nvmlSystemGetProcessName */ -nvmlReturn_t DECLDIR nvmlDeviceGetComputeRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetComputeRunningProcesses_v3(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); /** * Get information about processes with a graphics context on a device @@ -4289,8 +5060,9 @@ nvmlReturn_t DECLDIR nvmlDeviceGetComputeRunningProcesses(nvmlDevice_t device, u * @note In MIG mode, if device handle is provided, the API returns aggregate information, only if * the caller has appropriate privileges. Per-instance information can be queried by using * specific MIG device handles. + * Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode. * - * @param device The identifier of the target device + * @param device The device handle or MIG device handle * @param infoCount Reference in which to provide the \a infos array size, and * to return the number of returned elements * @param infos Reference in which to return the process information @@ -4304,11 +5076,55 @@ nvmlReturn_t DECLDIR nvmlDeviceGetComputeRunningProcesses(nvmlDevice_t device, u * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see \ref nvmlSystemGetProcessName + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGraphicsRunningProcesses_v3(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); + +/** + * Get information about processes with a MPS compute context on a device + * + * For Volta &tm; or newer fully supported devices. + * + * This function returns information only about compute running processes (e.g. CUDA application which have + * active context) utilizing MPS. Any graphics applications (e.g. using OpenGL, DirectX) won't be listed by + * this function. + * + * To query the current number of running compute processes, call this function with *infoCount = 0. The + * return code will be NVML_ERROR_INSUFFICIENT_SIZE, or NVML_SUCCESS if none are running. For this call + * \a infos is allowed to be NULL. + * + * The usedGpuMemory field returned is all of the memory used by the application. + * + * Keep in mind that information returned by this call is dynamic and the number of elements might change in + * time. Allocate more space for \a infos table in case new compute processes are spawned. + * + * @note In MIG mode, if device handle is provided, the API returns aggregate information, only if + * the caller has appropriate privileges. Per-instance information can be queried by using + * specific MIG device handles. + * Querying per-instance information using MIG device handles is not supported if the device is in vGPU Host virtualization mode. + * + * @param device The device handle or MIG device handle + * @param infoCount Reference in which to provide the \a infos array size, and + * to return the number of returned elements + * @param infos Reference in which to return the process information + * + * @return + * - \ref NVML_SUCCESS if \a infoCount and \a infos have been populated + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a infoCount indicates that the \a infos array is too small + * \a infoCount will contain minimal amount of space necessary for + * the call to complete + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, either of \a infoCount or \a infos is NULL + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by \a device * - \ref NVML_ERROR_UNKNOWN on any unexpected error * * @see \ref nvmlSystemGetProcessName */ -nvmlReturn_t DECLDIR nvmlDeviceGetGraphicsRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetMPSComputeRunningProcesses_v3(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_t *infos); /** * Check if the GPU devices are on the same physical board. @@ -4407,6 +5223,10 @@ nvmlReturn_t DECLDIR nvmlDeviceGetSamples(nvmlDevice_t device, nvmlSamplingType_ * BAR1 is used to map the FB (device memory) so that it can be directly accessed by the CPU or by 3rd party * devices (peer-to-peer on the PCIE bus). * + * @note In MIG mode, if device handle is provided, the API returns aggregate + * information, only if the caller has appropriate privileges. Per-instance + * information can be queried by using specific MIG device handles. + * * For Kepler &tm; or newer fully supported devices. * * @param device The identifier of the target device @@ -4424,7 +5244,6 @@ nvmlReturn_t DECLDIR nvmlDeviceGetSamples(nvmlDevice_t device, nvmlSamplingType_ */ nvmlReturn_t DECLDIR nvmlDeviceGetBAR1MemoryInfo(nvmlDevice_t device, nvmlBAR1Memory_t *bar1Memory); - /** * Gets the duration of time during which the device was throttled (lower than requested clocks) due to power * or thermal constraints. @@ -4451,6 +5270,117 @@ nvmlReturn_t DECLDIR nvmlDeviceGetBAR1MemoryInfo(nvmlDevice_t device, nvmlBAR1Me */ nvmlReturn_t DECLDIR nvmlDeviceGetViolationStatus(nvmlDevice_t device, nvmlPerfPolicyType_t perfPolicyType, nvmlViolationTime_t *violTime); +/** + * Gets the device's interrupt number + * + * @param device The identifier of the target device + * @param irqNum The interrupt number associated with the specified device + * + * @return + * - \ref NVML_SUCCESS if irq number is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a irqNum is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetIrqNum(nvmlDevice_t device, unsigned int *irqNum); + +/** + * Gets the device's core count + * + * @param device The identifier of the target device + * @param numCores The number of cores for the specified device + * + * @return + * - \ref NVML_SUCCESS if Gpu core count is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a numCores is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetNumGpuCores(nvmlDevice_t device, unsigned int *numCores); + +/** + * Gets the devices power source + * + * @param device The identifier of the target device + * @param powerSource The power source of the device + * + * @return + * - \ref NVML_SUCCESS if the current power source was successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a powerSource is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPowerSource(nvmlDevice_t device, nvmlPowerSource_t *powerSource); + +/** + * Gets the device's memory bus width + * + * @param device The identifier of the target device + * @param busWidth The devices's memory bus width + * + * @return + * - \ref NVML_SUCCESS if the memory bus width is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a busWidth is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMemoryBusWidth(nvmlDevice_t device, unsigned int *busWidth); + +/** + * Gets the device's PCIE Max Link speed in MBPS + * + * @param device The identifier of the target device + * @param maxSpeed The devices's PCIE Max Link speed in MBPS + * + * @return + * - \ref NVML_SUCCESS if Pcie Max Link Speed is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a maxSpeed is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPcieLinkMaxSpeed(nvmlDevice_t device, unsigned int *maxSpeed); + +/** + * Gets the device's PCIe Link speed in Mbps + * + * @param device The identifier of the target device + * @param pcieSpeed The devices's PCIe Max Link speed in Mbps + * + * @return + * - \ref NVML_SUCCESS if \a pcieSpeed has been retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pcieSpeed is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support PCIe speed getting + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetPcieSpeed(nvmlDevice_t device, unsigned int *pcieSpeed); + +/** + * Gets the device's Adaptive Clock status + * + * @param device The identifier of the target device + * @param adaptiveClockStatus The current adaptive clocking status + * + * @return + * - \ref NVML_SUCCESS if the current adaptive clocking status is successfully retrieved + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a adaptiveClockStatus is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * + */ +nvmlReturn_t DECLDIR nvmlDeviceGetAdaptiveClockInfoStatus(nvmlDevice_t device, unsigned int *adaptiveClockStatus); + /** * @} */ @@ -4669,7 +5599,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetRetiredPagesPendingStatus(nvmlDevice_t device, * @note On MIG-enabled GPUs with active instances, querying the number of * remapped rows is not supported * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * * @param device The identifier of the target device * @param corrRows Reference for number of rows remapped due to correctable errors @@ -5013,6 +5943,60 @@ nvmlReturn_t DECLDIR nvmlDeviceSetGpuLockedClocks(nvmlDevice_t device, unsigned */ nvmlReturn_t DECLDIR nvmlDeviceResetGpuLockedClocks(nvmlDevice_t device); +/** + * Set memory clocks that device will lock to. + * + * Sets the device's memory clocks to the value in the range of minMemClockMHz to maxMemClockMHz. + * Setting this will supersede application clock values and take effect regardless of whether a cuda app is running. + * See /ref nvmlDeviceSetApplicationsClocks + * + * Can be used as a setting to request constant performance. + * + * Requires root/admin permissions. + * + * After system reboot or driver reload applications clocks go back to their default value. + * See \ref nvmlDeviceResetMemoryLockedClocks. + * + * For Ampere &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param minMemClockMHz Requested minimum memory clock in MHz + * @param maxMemClockMHz Requested maximum memory clock in MHz + * + * @return + * - \ref NVML_SUCCESS if new settings were successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a minGpuClockMHz and \a maxGpuClockMHz + * is not a valid clock combination + * - \ref NVML_ERROR_NO_PERMISSION if the user doesn't have permission to perform this operation + * - \ref NVML_ERROR_NOT_SUPPORTED if the device doesn't support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetMemoryLockedClocks(nvmlDevice_t device, unsigned int minMemClockMHz, unsigned int maxMemClockMHz); + +/** + * Resets the memory clock to the default value + * + * This is the memory clock that will be used after system reboot or driver reload. + * Default values are idle clocks, but the current values can be changed using \ref nvmlDeviceSetApplicationsClocks. + * + * @see nvmlDeviceSetMemoryLockedClocks + * + * For Ampere &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * + * @return + * - \ref NVML_SUCCESS if new settings were successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceResetMemoryLockedClocks(nvmlDevice_t device); + /** * Set clocks that applications will lock to. * @@ -5054,6 +6038,29 @@ nvmlReturn_t DECLDIR nvmlDeviceResetGpuLockedClocks(nvmlDevice_t device); */ nvmlReturn_t DECLDIR nvmlDeviceSetApplicationsClocks(nvmlDevice_t device, unsigned int memClockMHz, unsigned int graphicsClockMHz); +/** + * Retrieves the frequency monitor fault status for the device. + * + * For Ampere &tm; or newer fully supported devices. + * Requires root user. + * + * See \ref nvmlClkMonStatus_t for details on decoding the status output. + * + * @param device The identifier of the target device + * @param status Reference in which to return the clkmon fault status + * + * @return + * - \ref NVML_SUCCESS if \a status has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a status is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + * + * @see nvmlDeviceGetClkMonStatus() + */ +nvmlReturn_t DECLDIR nvmlDeviceGetClkMonStatus(nvmlDevice_t device, nvmlClkMonStatus_t *status); + /** * Set new power limit of this device. * @@ -5448,6 +6455,25 @@ nvmlReturn_t DECLDIR nvmlDeviceFreezeNvLinkUtilizationCounter (nvmlDevice_t devi */ nvmlReturn_t DECLDIR nvmlDeviceResetNvLinkUtilizationCounter (nvmlDevice_t device, unsigned int link, unsigned int counter); +/** +* Get the NVLink device type of the remote device connected over the given link. +* +* @param device The device handle of the target GPU +* @param link The NVLink link index on the target GPU +* @param pNvLinkDeviceType Pointer in which the output remote device type is returned +* +* @return +* - \ref NVML_SUCCESS if \a pNvLinkDeviceType has been set +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_NOT_SUPPORTED if NVLink is not supported +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a link is invalid, or +* \a pNvLinkDeviceType is NULL +* - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is +* otherwise inaccessible +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkRemoteDeviceType(nvmlDevice_t device, unsigned int link, nvmlIntNvLinkDeviceType_t *pNvLinkDeviceType); + /** @} */ /***************************************************************************************************/ @@ -5733,19 +6759,34 @@ nvmlReturn_t DECLDIR nvmlDeviceDiscoverGpus (nvmlPciInfo_t *pciInfo); */ nvmlReturn_t DECLDIR nvmlDeviceGetFieldValues(nvmlDevice_t device, int valuesCount, nvmlFieldValue_t *values); +/** + * Clear values for a list of fields for a device. This API allows multiple fields to be cleared at once. + * + * @param device The device handle of the GPU to request field values for + * @param valuesCount Number of entries in values that should be cleared + * @param values Array of \a valuesCount structures to hold field values. + * Each value's fieldId must be populated prior to this call + * + * @return + * - \ref NVML_SUCCESS if any values in \a values were cleared. Note that you must + * check the nvmlReturn field of each value for each individual + * status + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a values is NULL + */ +nvmlReturn_t DECLDIR nvmlDeviceClearFieldValues(nvmlDevice_t device, int valuesCount, nvmlFieldValue_t *values); /** @} */ /***************************************************************************************************/ -/** @defgroup gridVirtual GRID Virtualization Enums, Constants and Structs +/** @defgroup vGPU Enums, Constants and Structs * @{ */ /** @} */ /***************************************************************************************************/ /***************************************************************************************************/ -/** @defgroup nvmlGridQueries GRID Virtualization APIs - * This chapter describes operations that are associated with NVIDIA GRID products. +/** @defgroup nvmlVirtualGpuQueries vGPU APIs + * This chapter describes operations that are associated with NVIDIA vGPU Software products. * @{ */ /***************************************************************************************************/ @@ -5804,20 +6845,20 @@ nvmlReturn_t DECLDIR nvmlDeviceGetHostVgpuMode(nvmlDevice_t device, nvmlHostVgpu nvmlReturn_t DECLDIR nvmlDeviceSetVirtualizationMode(nvmlDevice_t device, nvmlGpuVirtualizationMode_t virtualMode); /** - * Retrieve the GRID licensable features. + * Retrieve the vGPU Software licensable features. * - * Identifies whether the system supports GRID Software Licensing. If it does, return the list of licensable feature(s) + * Identifies whether the system supports vGPU Software Licensing. If it does, return the list of licensable feature(s) * and their current license status. * * @param device Identifier of the target device - * @param pGridLicensableFeatures Pointer to structure in which GRID licensable features are returned + * @param pGridLicensableFeatures Pointer to structure in which vGPU software licensable features are returned * * @return * - \ref NVML_SUCCESS if licensable features are successfully retrieved * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pGridLicensableFeatures is NULL * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures_v3(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); +nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures_v4(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); /** * Retrieves the current utilization and process ID @@ -5855,21 +6896,99 @@ nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures_v3(nvmlDevice_t device, * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, \a utilization is NULL, or \a samplingPeriodUs is NULL * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_NOT_FOUND if sample entries are not found * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlDeviceGetProcessUtilization(nvmlDevice_t device, nvmlProcessUtilizationSample_t *utilization, unsigned int *processSamplesCount, unsigned long long lastSeenTimeStamp); -/** @} */ - -/***************************************************************************************************/ -/** @defgroup nvmlVgpu GRID vGPU Management - * @{ +/** + * Retrieve GSP firmware version. + * + * The caller passes in buffer via \a version and corresponding GSP firmware numbered version + * is returned with the same parameter in string format. + * + * @param device Device handle + * @param version The retrieved GSP firmware version * - * This chapter describes APIs supporting NVIDIA GRID vGPU. + * @return + * - \ref NVML_SUCCESS if GSP firmware version is sucessfully retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or GSP \a version pointer is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if GSP firmware is not enabled for GPU + * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ -/***************************************************************************************************/ +nvmlReturn_t DECLDIR nvmlDeviceGetGspFirmwareVersion(nvmlDevice_t device, char *version); + +/** + * Retrieve GSP firmware mode. + * + * The caller passes in integer pointers. GSP firmware enablement and default mode information is returned with + * corresponding parameters. The return value in \a isEnabled and \a defaultMode should be treated as boolean. + * + * @param device Device handle + * @param isEnabled Pointer to specify if GSP firmware is enabled + * @param defaultMode Pointer to specify if GSP firmware is supported by default on \a device + * + * @return + * - \ref NVML_SUCCESS if GSP firmware mode is sucessfully retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or any of \a isEnabled or \a defaultMode is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGspFirmwareMode(nvmlDevice_t device, unsigned int *isEnabled, unsigned int *defaultMode); + +/** @} */ + +/***************************************************************************************************/ +/** @defgroup nvmlVgpu vGPU Management + * @{ + * + * This chapter describes APIs supporting NVIDIA vGPU. + */ +/***************************************************************************************************/ + +/** + * Retrieve the requested vGPU driver capability. + * + * Refer to the \a nvmlVgpuDriverCapability_t structure for the specific capabilities that can be queried. + * The return value in \a capResult should be treated as a boolean, with a non-zero value indicating that the capability + * is supported. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param capability Specifies the \a nvmlVgpuDriverCapability_t to be queried + * @param capResult A boolean for the queried capability indicating that feature is supported + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a capability is invalid, or \a capResult is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED the API is not supported in current state or \a devices not in vGPU mode + * - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlGetVgpuDriverCapabilities(nvmlVgpuDriverCapability_t capability, unsigned int *capResult); + +/** + * Retrieve the requested vGPU capability for GPU. + * + * Refer to the \a nvmlDeviceVgpuCapability_t structure for the specific capabilities that can be queried. + * The return value in \a capResult should be treated as a boolean, with a non-zero value indicating that the capability + * is supported. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param device The identifier of the target device + * @param capability Specifies the \a nvmlDeviceVgpuCapability_t to be queried + * @param capResult A boolean for the queried capability indicating that feature is supported + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid, or \a capability is invalid, or \a capResult is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED the API is not supported in current state or \a device not in vGPU mode + * - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuCapabilities(nvmlDevice_t device, nvmlDeviceVgpuCapability_t capability, unsigned int *capResult); /** * Retrieve the supported vGPU types on a physical GPU (device). @@ -5964,6 +7083,24 @@ nvmlReturn_t DECLDIR nvmlVgpuTypeGetClass(nvmlVgpuTypeId_t vgpuTypeId, char *vgp */ nvmlReturn_t DECLDIR nvmlVgpuTypeGetName(nvmlVgpuTypeId_t vgpuTypeId, char *vgpuTypeName, unsigned int *size); +/** + * Retrieve the GPU Instance Profile ID for the given vGPU type ID. + * The API will return a valid GPU Instance Profile ID for the MIG capable vGPU types, else INVALID_GPU_INSTANCE_PROFILE_ID is + * returned. + * + * For Kepler &tm; or newer fully supported devices. + * + * @param vgpuTypeId Handle to vGPU type + * @param gpuInstanceProfileId GPU Instance Profile ID + * + * @return + * - \ref NVML_SUCCESS successful completion + * - \ref NVML_ERROR_NOT_SUPPORTED if \a device is not in vGPU Host virtualization mode + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a gpuInstanceProfileId is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetGpuInstanceProfileId(nvmlVgpuTypeId_t vgpuTypeId, unsigned int *gpuInstanceProfileId); + /** * Retrieve the device ID of a vGPU type. * @@ -6230,6 +7367,8 @@ nvmlReturn_t DECLDIR nvmlVgpuInstanceGetVmDriverVersion(nvmlVgpuInstance_t vgpuI nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFbUsage(nvmlVgpuInstance_t vgpuInstance, unsigned long long *fbUsage); /** + * @deprecated Use \ref nvmlVgpuInstanceGetLicenseInfo_v2. + * * Retrieve the current licensing state of the vGPU instance. * * If the vGPU is currently licensed, \a licensed is set to 1, otherwise it is set to 0. @@ -6437,10 +7576,67 @@ nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFBCStats(nvmlVgpuInstance_t vgpuInstance */ nvmlReturn_t DECLDIR nvmlVgpuInstanceGetFBCSessions(nvmlVgpuInstance_t vgpuInstance, unsigned int *sessionCount, nvmlFBCSessionInfo_t *sessionInfo); +/** +* Retrieve the GPU Instance ID for the given vGPU Instance. +* The API will return a valid GPU Instance ID for MIG backed vGPU Instance, else INVALID_GPU_INSTANCE_ID is returned. +* +* For Kepler &tm; or newer fully supported devices. +* +* @param vgpuInstance Identifier of the target vGPU instance +* @param gpuInstanceId GPU Instance ID +* +* @return +* - \ref NVML_SUCCESS successful completion +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a gpuInstanceId is NULL. +* - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetGpuInstanceId(nvmlVgpuInstance_t vgpuInstance, unsigned int *gpuInstanceId); + +/** +* Retrieves the PCI Id of the given vGPU Instance i.e. the PCI Id of the GPU as seen inside the VM. +* +* The vGPU PCI id is returned as "00000000:00:00.0" if NVIDIA driver is not installed on the vGPU instance. +* +* @param vgpuInstance Identifier of the target vGPU instance +* @param vgpuPciId Caller-supplied buffer to return vGPU PCI Id string +* @param length Size of the vgpuPciId buffer +* +* @return +* - \ref NVML_SUCCESS if vGPU PCI Id is sucessfully retrieved +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a vgpuPciId is NULL +* - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system +* - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running on the vGPU instance +* - \ref NVML_ERROR_INSUFFICIENT_SIZE if \a length is too small, \a length is set to required length +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetGpuPciId(nvmlVgpuInstance_t vgpuInstance, char *vgpuPciId, unsigned int *length); + +/** +* Retrieve the requested capability for a given vGPU type. Refer to the \a nvmlVgpuCapability_t structure +* for the specific capabilities that can be queried. The return value in \a capResult should be treated as +* a boolean, with a non-zero value indicating that the capability is supported. +* +* For Maxwell &tm; or newer fully supported devices. +* +* @param vgpuTypeId Handle to vGPU type +* @param capability Specifies the \a nvmlVgpuCapability_t to be queried +* @param capResult A boolean for the queried capability indicating that feature is supported +* +* @return +* - \ref NVML_SUCCESS successful completion +* - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized +* - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuTypeId is invalid, or \a capability is invalid, or \a capResult is NULL +* - \ref NVML_ERROR_UNKNOWN on any unexpected error +*/ +nvmlReturn_t DECLDIR nvmlVgpuTypeGetCapabilities(nvmlVgpuTypeId_t vgpuTypeId, nvmlVgpuCapability_t capability, unsigned int *capResult); + /** @} */ /***************************************************************************************************/ -/** @defgroup nvml GRID Virtualization Migration +/** @defgroup nvml vGPU Migration * This chapter describes operations that are associated with vGPU Migration. * @{ */ @@ -6496,7 +7692,7 @@ typedef enum nvmlVgpuVmCompatibility_enum NVML_VGPU_VM_COMPATIBILITY_COLD = 0x1, //!< vGPU is runnable from a cold / powered-off state (ACPI S5) NVML_VGPU_VM_COMPATIBILITY_HIBERNATE = 0x2, //!< vGPU is runnable from a hibernated state (ACPI S4) NVML_VGPU_VM_COMPATIBILITY_SLEEP = 0x4, //!< vGPU is runnable from a sleeped state (ACPI S3) - NVML_VGPU_VM_COMPATIBILITY_LIVE = 0x8, //!< vGPU is runnable from a live/paused (ACPI S0) + NVML_VGPU_VM_COMPATIBILITY_LIVE = 0x8 //!< vGPU is runnable from a live/paused (ACPI S0) } nvmlVgpuVmCompatibility_t; /** @@ -6508,7 +7704,7 @@ typedef enum nvmlVgpuPgpuCompatibilityLimitCode_enum NVML_VGPU_COMPATIBILITY_LIMIT_HOST_DRIVER = 0x1, //!< ompatibility is limited by host driver version. NVML_VGPU_COMPATIBILITY_LIMIT_GUEST_DRIVER = 0x2, //!< Compatibility is limited by guest driver version. NVML_VGPU_COMPATIBILITY_LIMIT_GPU = 0x4, //!< Compatibility is limited by GPU hardware. - NVML_VGPU_COMPATIBILITY_LIMIT_OTHER = 0x80000000, //!< Compatibility is limited by an undefined factor. + NVML_VGPU_COMPATIBILITY_LIMIT_OTHER = 0x80000000 //!< Compatibility is limited by an undefined factor. } nvmlVgpuPgpuCompatibilityLimitCode_t; /** @@ -6530,7 +7726,7 @@ typedef struct nvmlVgpuPgpuCompatibility_st * is available. The current state of these dependent fields is reflected in the info structure's \ref nvmlVgpuGuestInfoState_t field. * * The VMM may choose to read and save the vGPU's VM info as persistent metadata associated with the VM, and provide - * it to GRID Virtual GPU Manager when creating a vGPU for subsequent instances of the VM. + * it to Virtual GPU Manager when creating a vGPU for subsequent instances of the VM. * * The caller passes in a buffer via \a vgpuMetadata, with the size of the buffer in \a bufferSize. If the vGPU Metadata structure * is too large to fit in the supplied buffer, the function returns NVML_ERROR_INSUFFICIENT_SIZE with the size needed @@ -6615,6 +7811,90 @@ nvmlReturn_t DECLDIR nvmlGetVgpuCompatibility(nvmlVgpuMetadata_t *vgpuMetadata, */ nvmlReturn_t DECLDIR nvmlDeviceGetPgpuMetadataString(nvmlDevice_t device, char *pgpuMetadata, unsigned int *bufferSize); +/** + * Returns the vGPU Software scheduler logs. + * \a pSchedulerLog points to a caller-allocated structure to contain the logs. The number of elements returned will + * never exceed \a NVML_SCHEDULER_SW_MAX_LOG_ENTRIES. + * + * To get the entire logs, call the function atleast 5 times a second. + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target \a device + * @param pSchedulerLog Reference in which \a pSchedulerLog is written + * + * @return + * - \ref NVML_SUCCESS vGPU scheduler logs were successfully obtained + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pSchedulerLog is NULL or \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuSchedulerLog(nvmlDevice_t device, nvmlVgpuSchedulerLog_t *pSchedulerLog); + +/** + * Returns the vGPU scheduler state. + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target \a device + * @param pSchedulerState Reference in which \a pSchedulerState is returned + * + * @return + * - \ref NVML_SUCCESS vGPU scheduler state is successfully obtained + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pSchedulerState is NULL or \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuSchedulerState(nvmlDevice_t device, nvmlVgpuSchedulerGetState_t *pSchedulerState); + +/** + * Sets the vGPU scheduler state. + * + * For Pascal &tm; or newer fully supported devices. + * + * The scheduler state change won’t persist across module load/unload. + * Scheduler state and params will be allowed to set only when no VM is running. + * In \a nvmlVgpuSchedulerSetState_t, IFF enableARRMode=1 then + * provide avgFactorForARR and frequency as input. If enableARRMode is disabled + * then provide timeslice as input. + * + * @param device The identifier of the target \a device + * @param pSchedulerState vGPU \a pSchedulerState to set + * + * @return + * - \ref NVML_SUCCESS vGPU scheduler state has been successfully set + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pSchedulerState is NULL or \a device is invalid + * - \ref NVML_ERROR_RESET_REQUIRED if setting \a pSchedulerState failed with fatal error, + * reboot is required to overcome from this error. + * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode + * or if any vGPU instance currently exists on the \a device + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetVgpuSchedulerState(nvmlDevice_t device, nvmlVgpuSchedulerSetState_t *pSchedulerState); + +/** + * Returns the vGPU scheduler capabilities. + * The list of supported vGPU schedulers returned in \a nvmlVgpuSchedulerCapabilities_t is from + * the NVML_VGPU_SCHEDULER_POLICY_*. This list enumerates the supported scheduler policies + * if the engine is Graphics type. + * The other values in \a nvmlVgpuSchedulerCapabilities_t are also applicable if the engine is + * Graphics type. For other engine types, it is BEST EFFORT policy. + * If ARR is supported and enabled, scheduling frequency and averaging factor are applicable + * else timeSlice is applicable. + * + * For Pascal &tm; or newer fully supported devices. + * + * @param device The identifier of the target \a device + * @param pCapabilities Reference in which \a pCapabilities is written + * + * @return + * - \ref NVML_SUCCESS vGPU scheduler capabilities were successfully obtained + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a pCapabilities is NULL or \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED The API is not supported in current state or \a device not in vGPU host mode + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetVgpuSchedulerCapabilities(nvmlDevice_t device, nvmlVgpuSchedulerCapabilities_t *pCapabilities); + /* * Virtual GPU (vGPU) version * @@ -6677,7 +7957,7 @@ nvmlReturn_t DECLDIR nvmlSetVgpuVersion(nvmlVgpuVersion_t *vgpuVersion); /** @} */ /***************************************************************************************************/ -/** @defgroup nvmlUtil GRID Virtualization Utilization and Accounting +/** @defgroup nvmlUtil vGPU Utilization and Accounting * This chapter describes operations that are associated with vGPU Utilization and Accounting. * @{ */ @@ -6884,44 +8164,61 @@ nvmlReturn_t DECLDIR nvmlVgpuInstanceGetAccountingStats(nvmlVgpuInstance_t vgpuI * - \ref NVML_ERROR_UNKNOWN on any unexpected error */ nvmlReturn_t DECLDIR nvmlVgpuInstanceClearAccountingPids(nvmlVgpuInstance_t vgpuInstance); + +/** + * Query the license information of the vGPU instance. + * + * For Maxwell &tm; or newer fully supported devices. + * + * @param vgpuInstance Identifier of the target vGPU instance + * @param licenseInfo Pointer to vGPU license information structure + * + * @return + * - \ref NVML_SUCCESS if information is successfully retrieved + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a vgpuInstance is 0, or \a licenseInfo is NULL + * - \ref NVML_ERROR_NOT_FOUND if \a vgpuInstance does not match a valid active vGPU instance on the system + * - \ref NVML_ERROR_DRIVER_NOT_LOADED if NVIDIA driver is not running on the vGPU instance + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetLicenseInfo_v2(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuLicenseInfo_t *licenseInfo); /** @} */ /***************************************************************************************************/ -/** @defgroup nvmlGpuBlacklistQueries GPU Blacklist Queries - * This chapter describes NVML operations that are associated with blacklisted GPUs. +/** @defgroup nvmlExcludedGpuQueries Excluded GPU Queries + * This chapter describes NVML operations that are associated with excluded GPUs. * @{ */ /***************************************************************************************************/ /** - * Blacklist GPU device information + * Excluded GPU device information **/ -typedef struct nvmlBlacklistDeviceInfo_st +typedef struct nvmlExcludedDeviceInfo_st { - nvmlPciInfo_t pciInfo; //!< The PCI information for the blacklisted GPU - char uuid[NVML_DEVICE_UUID_BUFFER_SIZE]; //!< The ASCII string UUID for the blacklisted GPU -} nvmlBlacklistDeviceInfo_t; + nvmlPciInfo_t pciInfo; //!< The PCI information for the excluded GPU + char uuid[NVML_DEVICE_UUID_BUFFER_SIZE]; //!< The ASCII string UUID for the excluded GPU +} nvmlExcludedDeviceInfo_t; /** - * Retrieves the number of blacklisted GPU devices in the system. + * Retrieves the number of excluded GPU devices in the system. * * For all products. * - * @param deviceCount Reference in which to return the number of blacklisted devices + * @param deviceCount Reference in which to return the number of excluded devices * * @return * - \ref NVML_SUCCESS if \a deviceCount has been set * - \ref NVML_ERROR_INVALID_ARGUMENT if \a deviceCount is NULL */ -nvmlReturn_t DECLDIR nvmlGetBlacklistDeviceCount(unsigned int *deviceCount); +nvmlReturn_t DECLDIR nvmlGetExcludedDeviceCount(unsigned int *deviceCount); /** - * Acquire the device information for a blacklisted device, based on its index. + * Acquire the device information for an excluded GPU device, based on its index. * * For all products. * * Valid indices are derived from the \a deviceCount returned by - * \ref nvmlGetBlacklistDeviceCount(). For example, if \a deviceCount is 2 the valid indices + * \ref nvmlGetExcludedDeviceCount(). For example, if \a deviceCount is 2 the valid indices * are 0 and 1, corresponding to GPU 0 and GPU 1. * * @param index The index of the target GPU, >= 0 and < \a deviceCount @@ -6931,9 +8228,9 @@ nvmlReturn_t DECLDIR nvmlGetBlacklistDeviceCount(unsigned int *deviceCount); * - \ref NVML_SUCCESS if \a device has been set * - \ref NVML_ERROR_INVALID_ARGUMENT if \a index is invalid or \a info is NULL * - * @see nvmlGetBlacklistDeviceCount + * @see nvmlGetExcludedDeviceCount */ -nvmlReturn_t DECLDIR nvmlGetBlacklistDeviceInfoByIndex(unsigned int index, nvmlBlacklistDeviceInfo_t *info); +nvmlReturn_t DECLDIR nvmlGetExcludedDeviceInfoByIndex(unsigned int index, nvmlExcludedDeviceInfo_t *info); /** @} */ @@ -6960,19 +8257,27 @@ nvmlReturn_t DECLDIR nvmlGetBlacklistDeviceInfoByIndex(unsigned int index, nvmlB * These macros should be passed to \ref nvmlDeviceGetGpuInstanceProfileInfo to retrieve the * detailed information about a GPU instance such as profile ID, engine counts. */ -#define NVML_GPU_INSTANCE_PROFILE_1_SLICE 0x0 -#define NVML_GPU_INSTANCE_PROFILE_2_SLICE 0x1 -#define NVML_GPU_INSTANCE_PROFILE_3_SLICE 0x2 -#define NVML_GPU_INSTANCE_PROFILE_4_SLICE 0x3 -#define NVML_GPU_INSTANCE_PROFILE_7_SLICE 0x4 -#define NVML_GPU_INSTANCE_PROFILE_COUNT 0x5 +#define NVML_GPU_INSTANCE_PROFILE_1_SLICE 0x0 +#define NVML_GPU_INSTANCE_PROFILE_2_SLICE 0x1 +#define NVML_GPU_INSTANCE_PROFILE_3_SLICE 0x2 +#define NVML_GPU_INSTANCE_PROFILE_4_SLICE 0x3 +#define NVML_GPU_INSTANCE_PROFILE_7_SLICE 0x4 +#define NVML_GPU_INSTANCE_PROFILE_8_SLICE 0x5 +#define NVML_GPU_INSTANCE_PROFILE_6_SLICE 0x6 +#define NVML_GPU_INSTANCE_PROFILE_1_SLICE_REV1 0x7 +#define NVML_GPU_INSTANCE_PROFILE_2_SLICE_REV1 0x8 +#define NVML_GPU_INSTANCE_PROFILE_1_SLICE_REV2 0x9 +#define NVML_GPU_INSTANCE_PROFILE_COUNT 0xA typedef struct nvmlGpuInstancePlacement_st { - unsigned int start; - unsigned int size; + unsigned int start; //!< Index of first occupied memory slice + unsigned int size; //!< Number of memory slices occupied } nvmlGpuInstancePlacement_t; +/** + * GPU instance profile information. + */ typedef struct nvmlGpuInstanceProfileInfo_st { unsigned int id; //!< Unique profile ID within the device @@ -6988,15 +8293,48 @@ typedef struct nvmlGpuInstanceProfileInfo_st unsigned long long memorySizeMB; //!< Memory size in MBytes } nvmlGpuInstanceProfileInfo_t; +/** + * GPU instance profile information (v2). + * + * Version 2 adds the \ref nvmlGpuInstanceProfileInfo_v2_t.version field + * to the start of the structure, and the \ref nvmlGpuInstanceProfileInfo_v2_t.name + * field to the end. This structure is not backwards-compatible with + * \ref nvmlGpuInstanceProfileInfo_t. + */ +typedef struct nvmlGpuInstanceProfileInfo_v2_st +{ + unsigned int version; //!< Structure version identifier (set to \ref nvmlGpuInstanceProfileInfo_v2) + unsigned int id; //!< Unique profile ID within the device + unsigned int isP2pSupported; //!< Peer-to-Peer support + unsigned int sliceCount; //!< GPU Slice count + unsigned int instanceCount; //!< GPU instance count + unsigned int multiprocessorCount; //!< Streaming Multiprocessor count + unsigned int copyEngineCount; //!< Copy Engine count + unsigned int decoderCount; //!< Decoder Engine count + unsigned int encoderCount; //!< Encoder Engine count + unsigned int jpegCount; //!< JPEG Engine count + unsigned int ofaCount; //!< OFA Engine count + unsigned long long memorySizeMB; //!< Memory size in MBytes + char name[NVML_DEVICE_NAME_V2_BUFFER_SIZE]; //!< Profile name +} nvmlGpuInstanceProfileInfo_v2_t; + +/** + * Version identifier value for \ref nvmlGpuInstanceProfileInfo_v2_t.version. + */ +#define nvmlGpuInstanceProfileInfo_v2 NVML_STRUCT_VERSION(GpuInstanceProfileInfo, 2) + typedef struct nvmlGpuInstanceInfo_st { - nvmlDevice_t device; //!< Parent device - unsigned int id; //!< Unique instance ID within the device - unsigned int profileId; //!< Unique profile ID within the device - nvmlGpuInstancePlacement_t placement; //!< Placement for this instance + nvmlDevice_t device; //!< Parent device + unsigned int id; //!< Unique instance ID within the device + unsigned int profileId; //!< Unique profile ID within the device + nvmlGpuInstancePlacement_t placement; //!< Placement for this instance } nvmlGpuInstanceInfo_t; -typedef struct nvmlGpuInstance_st* nvmlGpuInstance_t; +typedef struct +{ + struct nvmlGpuInstance_st* handle; +} nvmlGpuInstance_t; /** * Compute instance profiles. @@ -7009,11 +8347,23 @@ typedef struct nvmlGpuInstance_st* nvmlGpuInstance_t; #define NVML_COMPUTE_INSTANCE_PROFILE_3_SLICE 0x2 #define NVML_COMPUTE_INSTANCE_PROFILE_4_SLICE 0x3 #define NVML_COMPUTE_INSTANCE_PROFILE_7_SLICE 0x4 -#define NVML_COMPUTE_INSTANCE_PROFILE_COUNT 0x5 +#define NVML_COMPUTE_INSTANCE_PROFILE_8_SLICE 0x5 +#define NVML_COMPUTE_INSTANCE_PROFILE_6_SLICE 0x6 +#define NVML_COMPUTE_INSTANCE_PROFILE_1_SLICE_REV1 0x7 +#define NVML_COMPUTE_INSTANCE_PROFILE_COUNT 0x8 #define NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED 0x0 //!< All the engines except multiprocessors would be shared #define NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT 0x1 +typedef struct nvmlComputeInstancePlacement_st +{ + unsigned int start; //!< Index of first occupied compute slice + unsigned int size; //!< Number of compute slices occupied +} nvmlComputeInstancePlacement_t; + +/** + * Compute instance profile information. + */ typedef struct nvmlComputeInstanceProfileInfo_st { unsigned int id; //!< Unique profile ID within the GPU instance @@ -7027,21 +8377,52 @@ typedef struct nvmlComputeInstanceProfileInfo_st unsigned int sharedOfaCount; //!< Shared OFA Engine count } nvmlComputeInstanceProfileInfo_t; +/** + * Compute instance profile information (v2). + * + * Version 2 adds the \ref nvmlComputeInstanceProfileInfo_v2_t.version field + * to the start of the structure, and the \ref nvmlComputeInstanceProfileInfo_v2_t.name + * field to the end. This structure is not backwards-compatible with + * \ref nvmlComputeInstanceProfileInfo_t. + */ +typedef struct nvmlComputeInstanceProfileInfo_v2_st +{ + unsigned int version; //!< Structure version identifier (set to \ref nvmlComputeInstanceProfileInfo_v2) + unsigned int id; //!< Unique profile ID within the GPU instance + unsigned int sliceCount; //!< GPU Slice count + unsigned int instanceCount; //!< Compute instance count + unsigned int multiprocessorCount; //!< Streaming Multiprocessor count + unsigned int sharedCopyEngineCount; //!< Shared Copy Engine count + unsigned int sharedDecoderCount; //!< Shared Decoder Engine count + unsigned int sharedEncoderCount; //!< Shared Encoder Engine count + unsigned int sharedJpegCount; //!< Shared JPEG Engine count + unsigned int sharedOfaCount; //!< Shared OFA Engine count + char name[NVML_DEVICE_NAME_V2_BUFFER_SIZE]; //!< Profile name +} nvmlComputeInstanceProfileInfo_v2_t; + +/** + * Version identifier value for \ref nvmlComputeInstanceProfileInfo_v2_t.version. + */ +#define nvmlComputeInstanceProfileInfo_v2 NVML_STRUCT_VERSION(ComputeInstanceProfileInfo, 2) + typedef struct nvmlComputeInstanceInfo_st { - nvmlDevice_t device; //!< Parent device - nvmlGpuInstance_t gpuInstance; //!< Parent GPU instance - unsigned int id; //!< Unique instance ID within the GPU instance - unsigned int profileId; //!< Unique profile ID within the GPU instance + nvmlDevice_t device; //!< Parent device + nvmlGpuInstance_t gpuInstance; //!< Parent GPU instance + unsigned int id; //!< Unique instance ID within the GPU instance + unsigned int profileId; //!< Unique profile ID within the GPU instance + nvmlComputeInstancePlacement_t placement; //!< Placement for this instance within the GPU instance's compute slice range {0, sliceCount} } nvmlComputeInstanceInfo_t; -typedef struct nvmlComputeInstance_st* nvmlComputeInstance_t; +typedef struct +{ + struct nvmlComputeInstance_st* handle; +} nvmlComputeInstance_t; /** * Set MIG mode for the device. * - * For newer than Volta &tm; fully supported devices. - * Supported on Linux only. + * For Ampere &tm; or newer fully supported devices. * Requires root user. * * This mode determines whether a GPU instance can be created. @@ -7056,6 +8437,9 @@ typedef struct nvmlComputeInstance_st* nvmlComputeInstance_t; * unbind fails because the device isn't idle, \ref NVML_ERROR_IN_USE would be returned. The caller of this API * is expected to idle the device and retry setting the \a mode. * + * @note On Windows, only disabling MIG mode is supported. \a activationStatus would return \ref + * NVML_ERROR_NOT_SUPPORTED as GPU reset is not supported on Windows through this API. + * * @param device The identifier of the target device * @param mode The mode to be set, \ref NVML_DEVICE_MIG_DISABLE or * \ref NVML_DEVICE_MIG_ENABLE @@ -7073,8 +8457,7 @@ nvmlReturn_t DECLDIR nvmlDeviceSetMigMode(nvmlDevice_t device, unsigned int mode /** * Get MIG mode for the device. * - * For newer than Volta &tm; fully supported devices. - * Supported on Linux only. + * For Ampere &tm; or newer fully supported devices. * * Changing MIG modes may require device unbind or reset. The "pending" MIG mode refers to the target mode following the * next activation trigger. @@ -7098,9 +8481,8 @@ nvmlReturn_t DECLDIR nvmlDeviceGetMigMode(nvmlDevice_t device, unsigned int *cur * * Information provided by this API is immutable throughout the lifetime of a MIG mode. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. - * Requires privileged user. * * @param device The identifier of the target device * @param profile One of the NVML_GPU_INSTANCE_PROFILE_* @@ -7116,38 +8498,71 @@ nvmlReturn_t DECLDIR nvmlDeviceGetMigMode(nvmlDevice_t device, unsigned int *cur nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceProfileInfo(nvmlDevice_t device, unsigned int profile, nvmlGpuInstanceProfileInfo_t *info); +/** + * Versioned wrapper around \ref nvmlDeviceGetGpuInstanceProfileInfo that accepts a versioned + * \ref nvmlGpuInstanceProfileInfo_v2_t or later output structure. + * + * @note The caller must set the \ref nvmlGpuInstanceProfileInfo_v2_t.version field to the + * appropriate version prior to calling this function. For example: + * \code + * nvmlGpuInstanceProfileInfo_v2_t profileInfo = + * { .version = nvmlGpuInstanceProfileInfo_v2 }; + * nvmlReturn_t result = nvmlDeviceGetGpuInstanceProfileInfoV(device, + * profile, + * &profileInfo); + * \endcode + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param device The identifier of the target device + * @param profile One of the NVML_GPU_INSTANCE_PROFILE_* + * @param info Returns detailed profile information + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile, \a info, or \a info->version are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profile isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceProfileInfoV(nvmlDevice_t device, unsigned int profile, + nvmlGpuInstanceProfileInfo_v2_t *info); + /** * Get GPU instance placements. * * A placement represents the location of a GPU instance within a device. This API only returns all the possible * placements for the given profile. + * A created GPU instance occupies memory slices described by its placement. Creation of new GPU instance will + * fail if there is overlap with the already occupied memory slices. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * Requires privileged user. * * @param device The identifier of the target device * @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo - * @param placements Returns placements, the buffer must be large enough to accommodate - * the instances supported by the profile. - * See \ref nvmlDeviceGetGpuInstanceProfileInfo - * @param count The count of returned placements + * @param placements Returns placements allowed for the profile. Can be NULL to discover number + * of allowed placements for this profile. If non-NULL must be large enough + * to accommodate the placements supported by the profile. + * @param count Returns number of allowed placemenets for the profile. * * @return * - \ref NVML_SUCCESS Upon success * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized - * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profileId, \a placements or \a count are invalid + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profileId or \a count are invalid * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profileId isn't supported * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation */ -nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstancePossiblePlacements(nvmlDevice_t device, unsigned int profileId, - nvmlGpuInstancePlacement_t *placements, - unsigned int *count); +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstancePossiblePlacements_v2(nvmlDevice_t device, unsigned int profileId, + nvmlGpuInstancePlacement_t *placements, + unsigned int *count); /** * Get GPU instance profile capacity. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * Requires privileged user. * @@ -7168,7 +8583,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceRemainingCapacity(nvmlDevice_t devi /** * Create GPU instance. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * Requires privileged user. * @@ -7183,17 +8598,44 @@ nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceRemainingCapacity(nvmlDevice_t devi * - \ref NVML_SUCCESS Upon success * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile, \a profileId or \a gpuInstance are invalid - * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or in vGPU guest * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation * - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested GPU instance could not be created */ nvmlReturn_t DECLDIR nvmlDeviceCreateGpuInstance(nvmlDevice_t device, unsigned int profileId, nvmlGpuInstance_t *gpuInstance); +/** + * Create GPU instance with the specified placement. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * If the parent device is unbound, reset or the GPU instance is destroyed explicitly, the GPU instance handle would + * become invalid. The GPU instance must be recreated to acquire a valid handle. + * + * @param device The identifier of the target device + * @param profileId The GPU instance profile ID. See \ref nvmlDeviceGetGpuInstanceProfileInfo + * @param placement The requested placement. See \ref nvmlDeviceGetGpuInstancePossiblePlacements_v2 + * @param gpuInstance Returns the GPU instance handle + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a device, \a profile, \a profileId, \a placement or \a gpuInstance + * are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or in vGPU guest + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested GPU instance could not be created + */ +nvmlReturn_t DECLDIR nvmlDeviceCreateGpuInstanceWithPlacement(nvmlDevice_t device, unsigned int profileId, + const nvmlGpuInstancePlacement_t *placement, + nvmlGpuInstance_t *gpuInstance); /** * Destroy GPU instance. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * Requires privileged user. * @@ -7203,7 +8645,7 @@ nvmlReturn_t DECLDIR nvmlDeviceCreateGpuInstance(nvmlDevice_t device, unsigned i * - \ref NVML_SUCCESS Upon success * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance is invalid - * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or in vGPU guest * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation * - \ref NVML_ERROR_IN_USE If the GPU instance is in use. This error would be returned if processes * (e.g. CUDA application) or compute instances are active on the @@ -7214,7 +8656,7 @@ nvmlReturn_t DECLDIR nvmlGpuInstanceDestroy(nvmlGpuInstance_t gpuInstance); /** * Get GPU instances for given profile ID. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * Requires privileged user. * @@ -7238,7 +8680,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstances(nvmlDevice_t device, unsigned int /** * Get GPU instances for given instance ID. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * Requires privileged user. * @@ -7259,7 +8701,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceById(nvmlDevice_t device, unsigned /** * Get GPU instance information. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * * @param gpuInstance The GPU instance handle @@ -7278,9 +8720,8 @@ nvmlReturn_t DECLDIR nvmlGpuInstanceGetInfo(nvmlGpuInstance_t gpuInstance, nvmlG * * Information provided by this API is immutable throughout the lifetime of a MIG mode. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. - * Requires privileged user. * * @param gpuInstance The identifier of the target GPU instance * @param profile One of the NVML_COMPUTE_INSTANCE_PROFILE_* @@ -7298,10 +8739,44 @@ nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstanceProfileInfo(nvmlGpuInstanc unsigned int engProfile, nvmlComputeInstanceProfileInfo_t *info); +/** + * Versioned wrapper around \ref nvmlGpuInstanceGetComputeInstanceProfileInfo that accepts a versioned + * \ref nvmlComputeInstanceProfileInfo_v2_t or later output structure. + * + * @note The caller must set the \ref nvmlGpuInstanceProfileInfo_v2_t.version field to the + * appropriate version prior to calling this function. For example: + * \code + * nvmlComputeInstanceProfileInfo_v2_t profileInfo = + * { .version = nvmlComputeInstanceProfileInfo_v2 }; + * nvmlReturn_t result = nvmlGpuInstanceGetComputeInstanceProfileInfoV(gpuInstance, + * profile, + * engProfile, + * &profileInfo); + * \endcode + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * + * @param gpuInstance The identifier of the target GPU instance + * @param profile One of the NVML_COMPUTE_INSTANCE_PROFILE_* + * @param engProfile One of the NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_* + * @param info Returns detailed profile information + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profile, \a engProfile, \a info, or \a info->version are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a profile isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstanceProfileInfoV(nvmlGpuInstance_t gpuInstance, unsigned int profile, + unsigned int engProfile, + nvmlComputeInstanceProfileInfo_v2_t *info); + /** * Get compute instance profile capacity. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * Requires privileged user. * @@ -7320,10 +8795,41 @@ nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstanceProfileInfo(nvmlGpuInstanc nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstanceRemainingCapacity(nvmlGpuInstance_t gpuInstance, unsigned int profileId, unsigned int *count); +/** + * Get compute instance placements. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * A placement represents the location of a compute instance within a GPU instance. This API only returns all the possible + * placements for the given profile. + * A created compute instance occupies compute slices described by its placement. Creation of new compute instance will + * fail if there is overlap with the already occupied compute slices. + * + * @param gpuInstance The identifier of the target GPU instance + * @param profileId The compute instance profile ID. See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo + * @param placements Returns placements allowed for the profile. Can be NULL to discover number + * of allowed placements for this profile. If non-NULL must be large enough + * to accommodate the placements supported by the profile. + * @param count Returns number of allowed placemenets for the profile. + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profileId or \a count are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't have MIG mode enabled or \a profileId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstancePossiblePlacements(nvmlGpuInstance_t gpuInstance, + unsigned int profileId, + nvmlComputeInstancePlacement_t *placements, + unsigned int *count); + /** * Create compute instance. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * Requires privileged user. * @@ -7348,10 +8854,40 @@ nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstanceRemainingCapacity(nvmlGpuI nvmlReturn_t DECLDIR nvmlGpuInstanceCreateComputeInstance(nvmlGpuInstance_t gpuInstance, unsigned int profileId, nvmlComputeInstance_t *computeInstance); +/** + * Create compute instance with the specified placement. + * + * For Ampere &tm; or newer fully supported devices. + * Supported on Linux only. + * Requires privileged user. + * + * If the parent device is unbound, reset or the parent GPU instance is destroyed or the compute instance is destroyed + * explicitly, the compute instance handle would become invalid. The compute instance must be recreated to acquire + * a valid handle. + * + * @param gpuInstance The identifier of the target GPU instance + * @param profileId The compute instance profile ID. + * See \ref nvmlGpuInstanceGetComputeInstanceProfileInfo + * @param placement The requested placement. See \ref nvmlGpuInstanceGetComputeInstancePossiblePlacements + * @param computeInstance Returns the compute instance handle + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_UNINITIALIZED If library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT If \a gpuInstance, \a profile, \a profileId or \a computeInstance + * are invalid + * - \ref NVML_ERROR_NOT_SUPPORTED If \a profileId isn't supported + * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation + * - \ref NVML_ERROR_INSUFFICIENT_RESOURCES If the requested compute instance could not be created + */ +nvmlReturn_t DECLDIR nvmlGpuInstanceCreateComputeInstanceWithPlacement(nvmlGpuInstance_t gpuInstance, unsigned int profileId, + const nvmlComputeInstancePlacement_t *placement, + nvmlComputeInstance_t *computeInstance); + /** * Destroy compute instance. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * Requires privileged user. * @@ -7370,7 +8906,7 @@ nvmlReturn_t DECLDIR nvmlComputeInstanceDestroy(nvmlComputeInstance_t computeIns /** * Get compute instances for given profile ID. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * Requires privileged user. * @@ -7396,7 +8932,7 @@ nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstances(nvmlGpuInstance_t gpuIns /** * Get compute instance for given instance ID. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * Requires privileged user. * @@ -7418,7 +8954,7 @@ nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstanceById(nvmlGpuInstance_t gpu /** * Get compute instance information. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * * @param computeInstance The compute instance handle @@ -7430,7 +8966,7 @@ nvmlReturn_t DECLDIR nvmlGpuInstanceGetComputeInstanceById(nvmlGpuInstance_t gpu * - \ref NVML_ERROR_INVALID_ARGUMENT If \a computeInstance or \a info are invalid * - \ref NVML_ERROR_NO_PERMISSION If user doesn't have permission to perform the operation */ -nvmlReturn_t DECLDIR nvmlComputeInstanceGetInfo(nvmlComputeInstance_t computeInstance, nvmlComputeInstanceInfo_t *info); +nvmlReturn_t DECLDIR nvmlComputeInstanceGetInfo_v2(nvmlComputeInstance_t computeInstance, nvmlComputeInstanceInfo_t *info); /** * Test if the given handle refers to a MIG device. @@ -7439,7 +8975,7 @@ nvmlReturn_t DECLDIR nvmlComputeInstanceGetInfo(nvmlComputeInstance_t computeIns * These overloaded references can be used (with some restrictions) interchangeably * with a GPU device handle to execute queries at a per-compute instance granularity. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * * @param device NVML handle to test @@ -7459,7 +8995,7 @@ nvmlReturn_t DECLDIR nvmlDeviceIsMigDeviceHandle(nvmlDevice_t device, unsigned i * * GPU instance IDs are unique per device and remain valid until the GPU instance is destroyed. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * * @param device Target MIG device handle @@ -7480,7 +9016,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstanceId(nvmlDevice_t device, unsigned in * Compute instance IDs are unique per GPU instance and remain valid until the compute instance * is destroyed. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * * @param device Target MIG device handle @@ -7500,7 +9036,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetComputeInstanceId(nvmlDevice_t device, unsigne * * Returns zero if MIG is not supported or enabled. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * * @param device Target device handle @@ -7523,7 +9059,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetMaxMigDeviceCount(nvmlDevice_t device, unsigne * using this API. Handles may be reused and their properties can change in * the process. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * * @param device Reference to the parent GPU device handle @@ -7544,7 +9080,7 @@ nvmlReturn_t DECLDIR nvmlDeviceGetMigDeviceHandleByIndex(nvmlDevice_t device, un /** * Get parent device handle from a MIG device handle. * - * For newer than Volta &tm; fully supported devices. + * For Ampere &tm; or newer fully supported devices. * Supported on Linux only. * * @param migDevice MIG device handle @@ -7559,8 +9095,537 @@ nvmlReturn_t DECLDIR nvmlDeviceGetMigDeviceHandleByIndex(nvmlDevice_t device, un */ nvmlReturn_t DECLDIR nvmlDeviceGetDeviceHandleFromMigDeviceHandle(nvmlDevice_t migDevice, nvmlDevice_t *device); +/** + * Get the type of the GPU Bus (PCIe, PCI, ...) + * + * @param device The identifier of the target device + * @param type The PCI Bus type + * + * return + * - \ref NVML_SUCCESS if the bus \a type is successfully retreived + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \device is invalid or \type is NULL + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetBusType(nvmlDevice_t device, nvmlBusType_t *type); + +/** + * Retrieve performance monitor samples from the associated subdevice. + * + * @param device + * @param pDynamicPstatesInfo + * + * @return + * - \ref NVML_SUCCESS if \a pDynamicPstatesInfo has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a pDynamicPstatesInfo is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetDynamicPstatesInfo(nvmlDevice_t device, nvmlGpuDynamicPstatesInfo_t *pDynamicPstatesInfo); + +/** + * Sets the speed of a specified fan. + * + * WARNING: This function changes the fan control policy to manual. It means that YOU have to monitor + * the temperature and adjust the fan speed accordingly. + * If you set the fan speed too low you can burn your GPU! + * Use nvmlDeviceSetDefaultFanSpeed_v2 to restore default control policy. + * + * For all cuda-capable discrete products with fans that are Maxwell or Newer. + * + * device The identifier of the target device + * fan The index of the fan, starting at zero + * speed The target speed of the fan [0-100] in % of max speed + * + * return + * NVML_SUCCESS if the fan speed has been set + * NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * NVML_ERROR_INVALID_ARGUMENT if the device is not valid, or the speed is outside acceptable ranges, + * or if the fan index doesn't reference an actual fan. + * NVML_ERROR_NOT_SUPPORTED if the device is older than Maxwell. + * NVML_ERROR_UNKNOWN if there was an unexpected error. + */ +nvmlReturn_t DECLDIR nvmlDeviceSetFanSpeed_v2(nvmlDevice_t device, unsigned int fan, unsigned int speed); + +/** + * Retrieve the GPCCLK VF offset value + * @param[in] device The identifier of the target device + * @param[out] offset The retrieved GPCCLK VF offset value + * + * @return + * - \ref NVML_SUCCESS if \a offset has been successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpcClkVfOffset(nvmlDevice_t device, int *offset); + +/** + * Set the GPCCLK VF offset value + * @param[in] device The identifier of the target device + * @param[in] offset The GPCCLK VF offset value to set + * + * @return + * - \ref NVML_SUCCESS if \a offset has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetGpcClkVfOffset(nvmlDevice_t device, int offset); + +/** + * Retrieve the MemClk (Memory Clock) VF offset value. + * @param[in] device The identifier of the target device + * @param[out] offset The retrieved MemClk VF offset value + * + * @return + * - \ref NVML_SUCCESS if \a offset has been successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMemClkVfOffset(nvmlDevice_t device, int *offset); + +/** + * Set the MemClk (Memory Clock) VF offset value. It requires elevated privileges. + * @param[in] device The identifier of the target device + * @param[in] offset The MemClk VF offset value to set + * + * @return + * - \ref NVML_SUCCESS if \a offset has been set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_GPU_IS_LOST if the target GPU has fallen off the bus or is otherwise inaccessible + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceSetMemClkVfOffset(nvmlDevice_t device, int offset); + +/** + * Retrieve min and max clocks of some clock domain for a given PState + * + * @param device The identifier of the target device + * @param type Clock domain + * @param pstate PState to query + * @param minClockMHz Reference in which to return min clock frequency + * @param maxClockMHz Reference in which to return max clock frequency + * + * @return + * - \ref NVML_SUCCESS if everything worked + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device, \a type or \a pstate are invalid or both + * \a minClockMHz and \a maxClockMHz are NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMinMaxClockOfPState(nvmlDevice_t device, nvmlClockType_t type, nvmlPstates_t pstate, + unsigned int * minClockMHz, unsigned int * maxClockMHz); + +/** + * Get all supported Performance States (P-States) for the device. + * + * The returned array would contain a contiguous list of valid P-States supported by + * the device. If the number of supported P-States is fewer than the size of the array + * supplied missing elements would contain \a NVML_PSTATE_UNKNOWN. + * + * The number of elements in the returned list will never exceed \a NVML_MAX_GPU_PERF_PSTATES. + * + * @param device The identifier of the target device + * @param pstates Container to return the list of performance states + * supported by device + * @param size Size of the supplied \a pstates array in bytes + * + * @return + * - \ref NVML_SUCCESS if \a pstates array has been retrieved + * - \ref NVML_ERROR_INSUFFICIENT_SIZE if the the container supplied was not large enough to + * hold the resulting list + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device or \a pstates is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support performance state readings + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetSupportedPerformanceStates(nvmlDevice_t device, + nvmlPstates_t *pstates, unsigned int size); + +/** + * Retrieve the GPCCLK min max VF offset value. + * @param[in] device The identifier of the target device + * @param[out] minOffset The retrieved GPCCLK VF min offset value + * @param[out] maxOffset The retrieved GPCCLK VF max offset value + * + * @return + * - \ref NVML_SUCCESS if \a offset has been successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpcClkMinMaxVfOffset(nvmlDevice_t device, + int *minOffset, int *maxOffset); + +/** + * Retrieve the MemClk (Memory Clock) min max VF offset value. + * @param[in] device The identifier of the target device + * @param[out] minOffset The retrieved MemClk VF min offset value + * @param[out] maxOffset The retrieved MemClk VF max offset value + * + * @return + * - \ref NVML_SUCCESS if \a offset has been successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a offset is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if the device does not support this feature + * - \ref NVML_ERROR_UNKNOWN on any unexpected error + */ +nvmlReturn_t DECLDIR nvmlDeviceGetMemClkMinMaxVfOffset(nvmlDevice_t device, + int *minOffset, int *maxOffset); + +/** + * Get fabric information associated with the device. + * + * %HOPPER_OR_NEWER% + * + * On Hopper + NVSwitch systems, GPU is registered with the NVIDIA Fabric Manager + * Upon successful registration, the GPU is added to the NVLink fabric to enable + * peer-to-peer communication. + * This API reports the current state of the GPU in the NVLink fabric + * along with other useful information. + * + * @param device The identifier of the target device + * @param gpuFabricInfo Information about GPU fabric state + * + * @return + * - \ref NVML_SUCCESS Upon success + * - \ref NVML_ERROR_NOT_SUPPORTED If \a device doesn't support gpu fabric + */ +nvmlReturn_t DECLDIR nvmlDeviceGetGpuFabricInfo(nvmlDevice_t device, nvmlGpuFabricInfo_t *gpuFabricInfo); + /** @} */ +/***************************************************************************************************/ +/** @defgroup GPM NVML GPM + * @{ + */ +/***************************************************************************************************/ +/** @defgroup nvmlGpmEnums GPM Enums + * @{ + */ +/***************************************************************************************************/ + +/* GPM Metric Identifiers */ +typedef enum +{ + NVML_GPM_METRIC_GRAPHICS_UTIL = 1, /* Percentage of time any compute/graphics app was active on the GPU. 0.0 - 100.0 */ + NVML_GPM_METRIC_SM_UTIL = 2, /* Percentage of SMs that were busy. 0.0 - 100.0 */ + NVML_GPM_METRIC_SM_OCCUPANCY = 3, /* Percentage of warps that were active vs theoretical maximum. 0.0 - 100.0 */ + NVML_GPM_METRIC_INTEGER_UTIL = 4, /* Percentage of time the GPU's SMs were doing integer operations. 0.0 - 100.0 */ + NVML_GPM_METRIC_ANY_TENSOR_UTIL = 5, /* Percentage of time the GPU's SMs were doing ANY tensor operations. 0.0 - 100.0 */ + NVML_GPM_METRIC_DFMA_TENSOR_UTIL = 6, /* Percentage of time the GPU's SMs were doing DFMA tensor operations. 0.0 - 100.0 */ + NVML_GPM_METRIC_HMMA_TENSOR_UTIL = 7, /* Percentage of time the GPU's SMs were doing HMMA tensor operations. 0.0 - 100.0 */ + NVML_GPM_METRIC_IMMA_TENSOR_UTIL = 9, /* Percentage of time the GPU's SMs were doing IMMA tensor operations. 0.0 - 100.0 */ + NVML_GPM_METRIC_DRAM_BW_UTIL = 10, /* Percentage of DRAM bw used vs theoretical maximum. 0.0 - 100.0 */ + NVML_GPM_METRIC_FP64_UTIL = 11, /* Percentage of time the GPU's SMs were doing non-tensor FP64 math. 0.0 - 100.0 */ + NVML_GPM_METRIC_FP32_UTIL = 12, /* Percentage of time the GPU's SMs were doing non-tensor FP32 math. 0.0 - 100.0 */ + NVML_GPM_METRIC_FP16_UTIL = 13, /* Percentage of time the GPU's SMs were doing non-tensor FP16 math. 0.0 - 100.0 */ + NVML_GPM_METRIC_PCIE_TX_PER_SEC = 20, /* PCIe traffic from this GPU in MiB/sec */ + NVML_GPM_METRIC_PCIE_RX_PER_SEC = 21, /* PCIe traffic to this GPU in MiB/sec */ + NVML_GPM_METRIC_NVDEC_0_UTIL = 30, /* Percent utilization of NVDEC 0. 0.0 - 100.0 */ + NVML_GPM_METRIC_NVDEC_1_UTIL = 31, /* Percent utilization of NVDEC 1. 0.0 - 100.0 */ + NVML_GPM_METRIC_NVDEC_2_UTIL = 32, /* Percent utilization of NVDEC 2. 0.0 - 100.0 */ + NVML_GPM_METRIC_NVDEC_3_UTIL = 33, /* Percent utilization of NVDEC 3. 0.0 - 100.0 */ + NVML_GPM_METRIC_NVDEC_4_UTIL = 34, /* Percent utilization of NVDEC 4. 0.0 - 100.0 */ + NVML_GPM_METRIC_NVDEC_5_UTIL = 35, /* Percent utilization of NVDEC 5. 0.0 - 100.0 */ + NVML_GPM_METRIC_NVDEC_6_UTIL = 36, /* Percent utilization of NVDEC 6. 0.0 - 100.0 */ + NVML_GPM_METRIC_NVDEC_7_UTIL = 37, /* Percent utilization of NVDEC 7. 0.0 - 100.0 */ + NVML_GPM_METRIC_NVJPG_0_UTIL = 40, /* Percent utilization of NVJPG 0. 0.0 - 100.0 */ + NVML_GPM_METRIC_NVJPG_1_UTIL = 41, /* Percent utilization of NVJPG 1. 0.0 - 100.0 */ + NVML_GPM_METRIC_NVJPG_2_UTIL = 42, /* Percent utilization of NVJPG 2. 0.0 - 100.0 */ + NVML_GPM_METRIC_NVJPG_3_UTIL = 43, /* Percent utilization of NVJPG 3. 0.0 - 100.0 */ + NVML_GPM_METRIC_NVJPG_4_UTIL = 44, /* Percent utilization of NVJPG 4. 0.0 - 100.0 */ + NVML_GPM_METRIC_NVJPG_5_UTIL = 45, /* Percent utilization of NVJPG 5. 0.0 - 100.0 */ + NVML_GPM_METRIC_NVJPG_6_UTIL = 46, /* Percent utilization of NVJPG 6. 0.0 - 100.0 */ + NVML_GPM_METRIC_NVJPG_7_UTIL = 47, /* Percent utilization of NVJPG 7. 0.0 - 100.0 */ + NVML_GPM_METRIC_NVOFA_0_UTIL = 50, /* Percent utilization of NVOFA 0. 0.0 - 100.0 */ + NVML_GPM_METRIC_NVLINK_TOTAL_RX_PER_SEC = 60, /* NvLink read bandwidth for all links in MiB/sec */ + NVML_GPM_METRIC_NVLINK_TOTAL_TX_PER_SEC = 61, /* NvLink write bandwidth for all links in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L0_RX_PER_SEC = 62, /* NvLink read bandwidth for link 0 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L0_TX_PER_SEC = 63, /* NvLink write bandwidth for link 0 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L1_RX_PER_SEC = 64, /* NvLink read bandwidth for link 1 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L1_TX_PER_SEC = 65, /* NvLink write bandwidth for link 1 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L2_RX_PER_SEC = 66, /* NvLink read bandwidth for link 2 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L2_TX_PER_SEC = 67, /* NvLink write bandwidth for link 2 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L3_RX_PER_SEC = 68, /* NvLink read bandwidth for link 3 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L3_TX_PER_SEC = 69, /* NvLink write bandwidth for link 3 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L4_RX_PER_SEC = 70, /* NvLink read bandwidth for link 4 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L4_TX_PER_SEC = 71, /* NvLink write bandwidth for link 4 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L5_RX_PER_SEC = 72, /* NvLink read bandwidth for link 5 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L5_TX_PER_SEC = 73, /* NvLink write bandwidth for link 5 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L6_RX_PER_SEC = 74, /* NvLink read bandwidth for link 6 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L6_TX_PER_SEC = 75, /* NvLink write bandwidth for link 6 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L7_RX_PER_SEC = 76, /* NvLink read bandwidth for link 7 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L7_TX_PER_SEC = 77, /* NvLink write bandwidth for link 7 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L8_RX_PER_SEC = 78, /* NvLink read bandwidth for link 8 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L8_TX_PER_SEC = 79, /* NvLink write bandwidth for link 8 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L9_RX_PER_SEC = 80, /* NvLink read bandwidth for link 9 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L9_TX_PER_SEC = 81, /* NvLink write bandwidth for link 9 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L10_RX_PER_SEC = 82, /* NvLink read bandwidth for link 10 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L10_TX_PER_SEC = 83, /* NvLink write bandwidth for link 10 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L11_RX_PER_SEC = 84, /* NvLink read bandwidth for link 11 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L11_TX_PER_SEC = 85, /* NvLink write bandwidth for link 11 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L12_RX_PER_SEC = 86, /* NvLink read bandwidth for link 12 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L12_TX_PER_SEC = 87, /* NvLink write bandwidth for link 12 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L13_RX_PER_SEC = 88, /* NvLink read bandwidth for link 13 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L13_TX_PER_SEC = 89, /* NvLink write bandwidth for link 13 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L14_RX_PER_SEC = 90, /* NvLink read bandwidth for link 14 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L14_TX_PER_SEC = 91, /* NvLink write bandwidth for link 14 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L15_RX_PER_SEC = 92, /* NvLink read bandwidth for link 15 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L15_TX_PER_SEC = 93, /* NvLink write bandwidth for link 15 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L16_RX_PER_SEC = 94, /* NvLink read bandwidth for link 16 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L16_TX_PER_SEC = 95, /* NvLink write bandwidth for link 16 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L17_RX_PER_SEC = 96, /* NvLink read bandwidth for link 17 in MiB/sec */ + NVML_GPM_METRIC_NVLINK_L17_TX_PER_SEC = 97, /* NvLink write bandwidth for link 17 in MiB/sec */ + NVML_GPM_METRIC_MAX = 98, /* Maximum value above +1. Note that changing this + should also change NVML_GPM_METRICS_GET_VERSION + due to struct size change */ +} nvmlGpmMetricId_t; + +/** @} */ // @defgroup nvmlGpmEnums + + +/***************************************************************************************************/ +/** @defgroup nvmlGpmStructs GPM Structs + * @{ + */ +/***************************************************************************************************/ + +/* Handle to an allocated GPM sample allocated with nvmlGpmSampleAlloc() + Free this with nvmlGpmSampleFree() */ +typedef struct +{ + struct nvmlGpmSample_st* handle; +} nvmlGpmSample_t; + +typedef struct { + char *shortName; + char *longName; + char *unit; +} nvmlGpmMetricMetricInfo_t; + +typedef struct +{ + unsigned int metricId; /* IN: NVML_GPM_METRIC_? #define of which metric to retrieve */ + nvmlReturn_t nvmlReturn; /* OUT: Status of this metric. If this is nonzero, then value is not valid */ + double value; /* OUT: Value of this metric. Is only valid if nvmlReturn is 0 (NVML_SUCCESS) */ + nvmlGpmMetricMetricInfo_t metricInfo; /* OUT: Metric name and unit. Those can be NULL if not defined */ +} nvmlGpmMetric_t; + +typedef struct +{ + unsigned int version; /* IN: Set to NVML_GPM_METRICS_GET_VERSION */ + unsigned int numMetrics; /* IN: How many metrics to retrieve in metrics[] */ + nvmlGpmSample_t sample1; /* IN: Sample buffer */ + nvmlGpmSample_t sample2; /* IN: Sample buffer */ + nvmlGpmMetric_t metrics[NVML_GPM_METRIC_MAX]; /* IN/OUT: Array of metrics. Set metricId on call. + see nvmlReturn and value on return */ +} nvmlGpmMetricsGet_t; + +#define NVML_GPM_METRICS_GET_VERSION 1 + +typedef struct +{ + unsigned int version; /* IN: Set to NVML_GPM_SUPPORT_VERSION */ + unsigned int isSupportedDevice; /* OUT: Indicates device support */ +} nvmlGpmSupport_t; + +#define NVML_GPM_SUPPORT_VERSION 1 + +/** @} */ // @defgroup nvmlGPMStructs + +/***************************************************************************************************/ +/** @defgroup nvmlGpmFunctions GPM Functions + * @{ + */ +/***************************************************************************************************/ + +/** + * Calculate GPM metrics from two samples. + * + * + * @param metricsGet IN/OUT: populated nvmlGpmMetricsGet_t struct + * + * %HOPPER_OR_NEWER% + * + * @return + * - \ref NVML_SUCCESS on success + * - Nonzero NVML_ERROR_? enum on error + */ +nvmlReturn_t DECLDIR nvmlGpmMetricsGet(nvmlGpmMetricsGet_t *metricsGet); + + +/** + * Free an allocated sample buffer that was allocated with \ref nvmlGpmSampleAlloc() + * + * %HOPPER_OR_NEWER% + * + * @param gpmSample Sample to free + * + * @return + * - \ref NVML_SUCCESS on success + * - \ref NVML_ERROR_INVALID_ARGUMENT if an invalid pointer is provided + */ +nvmlReturn_t DECLDIR nvmlGpmSampleFree(nvmlGpmSample_t gpmSample); + + +/** + * Allocate a sample buffer to be used with NVML GPM . You will need to allocate + * at least two of these buffers to use with the NVML GPM feature + * + * %HOPPER_OR_NEWER% + * + * @param gpmSample Where the allocated sample will be stored + * + * @return + * - \ref NVML_SUCCESS on success + * - \ref NVML_ERROR_INVALID_ARGUMENT if an invalid pointer is provided + * - \ref NVML_ERROR_MEMORY if system memory is insufficient + */ +nvmlReturn_t DECLDIR nvmlGpmSampleAlloc(nvmlGpmSample_t *gpmSample); + +/** + * Read a sample of GPM metrics into the provided \a gpmSample buffer. After + * two samples are gathered, you can call nvmlGpmMetricGet on those samples to + * retrive metrics + * + * %HOPPER_OR_NEWER% + * + * @param device Device to get samples for + * @param gpmSample Buffer to read samples into + * + * @return + * - \ref NVML_SUCCESS on success + * - Nonzero NVML_ERROR_? enum on error + */ +nvmlReturn_t DECLDIR nvmlGpmSampleGet(nvmlDevice_t device, nvmlGpmSample_t gpmSample); + +/** + * Read a sample of GPM metrics into the provided \a gpmSample buffer for a MIG GPU Instance. + * + * After two samples are gathered, you can call nvmlGpmMetricGet on those + * samples to retrive metrics + * + * %HOPPER_OR_NEWER% + * + * @param device Device to get samples for + * @param gpuInstanceId MIG GPU Instance ID + * @param gpmSample Buffer to read samples into + * + * @return + * - \ref NVML_SUCCESS on success + * - Nonzero NVML_ERROR_? enum on error + */ +nvmlReturn_t DECLDIR nvmlGpmMigSampleGet(nvmlDevice_t device, unsigned int gpuInstanceId, nvmlGpmSample_t gpmSample); + +/** + * Indicate whether the supplied device supports GPM + * + * @param device NVML device to query for + * @param gpmSupport Structure to indicate GPM support. Indicates + * GPM support per system for the supplied device + * + * @return + * - NVML_SUCCESS on success + * - Nonzero NVML_ERROR_? enum if there is an error in processing the query + */ +nvmlReturn_t DECLDIR nvmlGpmQueryDeviceSupport(nvmlDevice_t device, nvmlGpmSupport_t *gpmSupport); + +/** @} */ // @defgroup nvmlGpmFunctions +/** @} */ // @defgroup GPM + +/***************************************************************************************************/ +/** @defgroup nvmlDevice definitions related to Counter Collection Unit + * @{ + */ +/***************************************************************************************************/ + +/* CCU Stream State */ +#define NVML_COUNTER_COLLECTION_UNIT_STREAM_STATE_DISABLE 0 +#define NVML_COUNTER_COLLECTION_UNIT_STREAM_STATE_ENABLE 1 + +/** + * Get counter collection unit stream state. + * + * %HOPPER_OR_NEWER% + * Supported on Linux, Windows TCC. + * + * @param device The identifier of the target device + * @param state Returns counter collection unit stream state + * NVML_COUNTER_COLLECTION_UNIT_STREAM_STATE_DISABLE or + * NVML_COUNTER_COLLECTION_UNIT_STREAM_STATE_ENABLE + * + * @return + * - \ref NVML_SUCCESS if \a current counter collection unit stream state were successfully queried + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a state is NULL + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + */ +nvmlReturn_t DECLDIR nvmlDeviceCcuGetStreamState(nvmlDevice_t device, unsigned int *state); + +/** + * Set counter collection unit stream state. + * + * %HOPPER_OR_NEWER% + * Supported on Linux, Windows TCC. + * + * @param device The identifier of the target device + * @param state Counter collection unit stream state, + * NVML_COUNTER_COLLECTION_UNIT_STREAM_STATE_DISABLE or + * NVML_COUNTER_COLLECTION_UNIT_STREAM_STATE_ENABLE + * + * @return + * - \ref NVML_SUCCESS if \a current counter collection unit stream state is successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + */ +nvmlReturn_t DECLDIR nvmlDeviceCcuSetStreamState(nvmlDevice_t device, unsigned int state); + +/** @} */ // @defgroup CCU + +#define NVML_NVLINK_POWER_STATE_HIGH_SPEED 0x0 +#define NVML_NVLINK_POWER_STATE_LOW 0x1 + +#define NVML_NVLINK_LOW_POWER_THRESHOLD_MIN 0x1 +#define NVML_NVLINK_LOW_POWER_THRESHOLD_MAX 0x1FFF +#define NVML_NVLINK_LOW_POWER_THRESHOLD_RESET 0xFFFFFFFF + +/* Structure containing Low Power parameters */ +typedef struct nvmlNvLinkPowerThres_st +{ + unsigned int lowPwrThreshold; //!< Low power threshold (in units of 100us) +} nvmlNvLinkPowerThres_t; + +/** + * Set NvLink Low Power Threshold for device. + * + * %HOPPER_OR_NEWER% + * + * @param device The identifier of the target device + * @param info Reference to \a nvmlNvLinkPowerThres_t struct + * input parameters + * + * @return + * - \ref NVML_SUCCESS if the \a Threshold is successfully set + * - \ref NVML_ERROR_UNINITIALIZED if the library has not been successfully initialized + * - \ref NVML_ERROR_INVALID_ARGUMENT if \a device is invalid or \a Threshold is not within range + * - \ref NVML_ERROR_NOT_SUPPORTED if this query is not supported by the device + * + **/ +nvmlReturn_t DECLDIR nvmlDeviceSetNvLinkDeviceLowPowerThreshold(nvmlDevice_t device, nvmlNvLinkPowerThres_t *info); + /** * NVML API versioning support */ @@ -7575,16 +9640,31 @@ nvmlReturn_t DECLDIR nvmlDeviceGetPciInfo_v2(nvmlDevice_t device, nvmlPciInfo_t nvmlReturn_t DECLDIR nvmlDeviceGetNvLinkRemotePciInfo(nvmlDevice_t device, unsigned int link, nvmlPciInfo_t *pci); nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures_v2(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); +nvmlReturn_t DECLDIR nvmlDeviceGetGridLicensableFeatures_v3(nvmlDevice_t device, nvmlGridLicensableFeatures_t *pGridLicensableFeatures); nvmlReturn_t DECLDIR nvmlDeviceRemoveGpu(nvmlPciInfo_t *pciInfo); nvmlReturn_t DECLDIR nvmlEventSetWait(nvmlEventSet_t set, nvmlEventData_t * data, unsigned int timeoutms); nvmlReturn_t DECLDIR nvmlDeviceGetAttributes(nvmlDevice_t device, nvmlDeviceAttributes_t *attributes); +nvmlReturn_t DECLDIR nvmlComputeInstanceGetInfo(nvmlComputeInstance_t computeInstance, nvmlComputeInstanceInfo_t *info); +nvmlReturn_t DECLDIR nvmlDeviceGetComputeRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v1_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetComputeRunningProcesses_v2(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v2_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetGraphicsRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v1_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetGraphicsRunningProcesses_v2(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v2_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetMPSComputeRunningProcesses(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v1_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetMPSComputeRunningProcesses_v2(nvmlDevice_t device, unsigned int *infoCount, nvmlProcessInfo_v2_t *infos); +nvmlReturn_t DECLDIR nvmlDeviceGetGpuInstancePossiblePlacements(nvmlDevice_t device, unsigned int profileId, nvmlGpuInstancePlacement_t *placements, unsigned int *count); +nvmlReturn_t DECLDIR nvmlVgpuInstanceGetLicenseInfo(nvmlVgpuInstance_t vgpuInstance, nvmlVgpuLicenseInfo_t *licenseInfo); + #endif // #ifdef NVML_NO_UNVERSIONED_FUNC_DEFS #if defined(NVML_NO_UNVERSIONED_FUNC_DEFS) // We don't define APIs to run new versions if this guard is present so there is // no need to undef #elif defined(__NVML_API_VERSION_INTERNAL) +#undef nvmlDeviceGetGraphicsRunningProcesses +#undef nvmlDeviceGetComputeRunningProcesses +#undef nvmlDeviceGetMPSComputeRunningProcesses #undef nvmlDeviceGetAttributes +#undef nvmlComputeInstanceGetInfo #undef nvmlEventSetWait #undef nvmlDeviceGetGridLicensableFeatures #undef nvmlDeviceRemoveGpu @@ -7594,6 +9674,12 @@ nvmlReturn_t DECLDIR nvmlDeviceGetAttributes(nvmlDevice_t device, nvmlDeviceAttr #undef nvmlDeviceGetHandleByIndex #undef nvmlDeviceGetHandleByPciBusId #undef nvmlInit +#undef nvmlBlacklistDeviceInfo_t +#undef nvmlGetBlacklistDeviceCount +#undef nvmlGetBlacklistDeviceInfoByIndex +#undef nvmlDeviceGetGpuInstancePossiblePlacements +#undef nvmlVgpuInstanceGetLicenseInfo + #endif #ifdef __cplusplus diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/return.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/return.go new file mode 100644 index 0000000..fdf1191 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/return.go @@ -0,0 +1,20 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +// nvml.ErrorString() +func ErrorString(Result Return) string { + return nvmlErrorString(Result) +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/system.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/system.go new file mode 100644 index 0000000..424f99b --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/system.go @@ -0,0 +1,81 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +// nvml.SystemGetDriverVersion() +func SystemGetDriverVersion() (string, Return) { + Version := make([]byte, SYSTEM_DRIVER_VERSION_BUFFER_SIZE) + ret := nvmlSystemGetDriverVersion(&Version[0], SYSTEM_DRIVER_VERSION_BUFFER_SIZE) + return string(Version[:clen(Version)]), ret +} + +// nvml.SystemGetNVMLVersion() +func SystemGetNVMLVersion() (string, Return) { + Version := make([]byte, SYSTEM_NVML_VERSION_BUFFER_SIZE) + ret := nvmlSystemGetNVMLVersion(&Version[0], SYSTEM_NVML_VERSION_BUFFER_SIZE) + return string(Version[:clen(Version)]), ret +} + +// nvml.SystemGetCudaDriverVersion() +func SystemGetCudaDriverVersion() (int, Return) { + var CudaDriverVersion int32 + ret := nvmlSystemGetCudaDriverVersion(&CudaDriverVersion) + return int(CudaDriverVersion), ret +} + +// nvml.SystemGetCudaDriverVersion_v2() +func SystemGetCudaDriverVersion_v2() (int, Return) { + var CudaDriverVersion int32 + ret := nvmlSystemGetCudaDriverVersion_v2(&CudaDriverVersion) + return int(CudaDriverVersion), ret +} + +// nvml.SystemGetProcessName() +func SystemGetProcessName(Pid int) (string, Return) { + Name := make([]byte, SYSTEM_PROCESS_NAME_BUFFER_SIZE) + ret := nvmlSystemGetProcessName(uint32(Pid), &Name[0], SYSTEM_PROCESS_NAME_BUFFER_SIZE) + return string(Name[:clen(Name)]), ret +} + +// nvml.SystemGetHicVersion() +func SystemGetHicVersion() ([]HwbcEntry, Return) { + var HwbcCount uint32 = 1 // Will be reduced upon returning + for { + HwbcEntries := make([]HwbcEntry, HwbcCount) + ret := nvmlSystemGetHicVersion(&HwbcCount, &HwbcEntries[0]) + if ret == SUCCESS { + return HwbcEntries[:HwbcCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + HwbcCount *= 2 + } +} + +// nvml.SystemGetTopologyGpuSet() +func SystemGetTopologyGpuSet(CpuNumber int) ([]Device, Return) { + var Count uint32 + ret := nvmlSystemGetTopologyGpuSet(uint32(CpuNumber), &Count, nil) + if ret != SUCCESS { + return nil, ret + } + if Count == 0 { + return []Device{}, ret + } + DeviceArray := make([]Device, Count) + ret = nvmlSystemGetTopologyGpuSet(uint32(CpuNumber), &Count, &DeviceArray[0]) + return DeviceArray, ret +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/types_gen.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/types_gen.go new file mode 100644 index 0000000..396886d --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/types_gen.go @@ -0,0 +1,583 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs types.go + +package nvml + +import "unsafe" + +type Device struct { + Handle *_Ctype_struct_nvmlDevice_st +} + +type PciInfo struct { + BusIdLegacy [16]int8 + Domain uint32 + Bus uint32 + Device uint32 + PciDeviceId uint32 + PciSubSystemId uint32 + BusId [32]int8 +} + +type EccErrorCounts struct { + L1Cache uint64 + L2Cache uint64 + DeviceMemory uint64 + RegisterFile uint64 +} + +type Utilization struct { + Gpu uint32 + Memory uint32 +} + +type Memory struct { + Total uint64 + Free uint64 + Used uint64 +} + +type Memory_v2 struct { + Version uint32 + Total uint64 + Reserved uint64 + Free uint64 + Used uint64 +} + +type BAR1Memory struct { + Bar1Total uint64 + Bar1Free uint64 + Bar1Used uint64 +} + +type ProcessInfo_v1 struct { + Pid uint32 + UsedGpuMemory uint64 +} + +type ProcessInfo_v2 struct { + Pid uint32 + UsedGpuMemory uint64 + GpuInstanceId uint32 + ComputeInstanceId uint32 +} + +type ProcessInfo struct { + Pid uint32 + UsedGpuMemory uint64 + GpuInstanceId uint32 + ComputeInstanceId uint32 +} + +type DeviceAttributes struct { + MultiprocessorCount uint32 + SharedCopyEngineCount uint32 + SharedDecoderCount uint32 + SharedEncoderCount uint32 + SharedJpegCount uint32 + SharedOfaCount uint32 + GpuInstanceSliceCount uint32 + ComputeInstanceSliceCount uint32 + MemorySizeMB uint64 +} + +type RowRemapperHistogramValues struct { + Max uint32 + High uint32 + Partial uint32 + Low uint32 + None uint32 +} + +type NvLinkUtilizationControl struct { + Units uint32 + Pktfilter uint32 +} + +type BridgeChipInfo struct { + Type uint32 + FwVersion uint32 +} + +type BridgeChipHierarchy struct { + BridgeCount uint8 + BridgeChipInfo [128]BridgeChipInfo +} + +const sizeofValue = unsafe.Sizeof([8]byte{}) + +type Value [sizeofValue]byte + +type Sample struct { + TimeStamp uint64 + SampleValue [8]byte +} + +type ViolationTime struct { + ReferenceTime uint64 + ViolationTime uint64 +} + +type GpuThermalSettingsSensor struct { + Controller int32 + DefaultMinTemp int32 + DefaultMaxTemp int32 + CurrentTemp int32 + Target int32 +} + +type GpuThermalSettings struct { + Count uint32 + Sensor [3]GpuThermalSettingsSensor +} + +type ClkMonFaultInfo struct { + ClkApiDomain uint32 + ClkDomainFaultMask uint32 +} + +type ClkMonStatus struct { + BGlobalStatus uint32 + ClkMonListSize uint32 + ClkMonList [32]ClkMonFaultInfo +} + +type VgpuTypeId uint32 + +type VgpuInstance uint32 + +type VgpuInstanceUtilizationSample struct { + VgpuInstance uint32 + TimeStamp uint64 + SmUtil [8]byte + MemUtil [8]byte + EncUtil [8]byte + DecUtil [8]byte +} + +type VgpuProcessUtilizationSample struct { + VgpuInstance uint32 + Pid uint32 + ProcessName [64]int8 + TimeStamp uint64 + SmUtil uint32 + MemUtil uint32 + EncUtil uint32 + DecUtil uint32 +} + +type VgpuSchedulerParamsVgpuSchedDataWithARR struct { + AvgFactor uint32 + Timeslice uint32 +} + +type VgpuSchedulerParamsVgpuSchedData struct { + Timeslice uint32 +} + +const sizeofVgpuSchedulerParams = unsafe.Sizeof([8]byte{}) + +type VgpuSchedulerParams [sizeofVgpuSchedulerParams]byte + +type VgpuSchedulerLogEntry struct { + Timestamp uint64 + TimeRunTotal uint64 + TimeRun uint64 + SwRunlistId uint32 + TargetTimeSlice uint64 + CumulativePreemptionTime uint64 +} + +type VgpuSchedulerLog struct { + EngineId uint32 + SchedulerPolicy uint32 + IsEnabledARR uint32 + SchedulerParams [8]byte + EntriesCount uint32 + LogEntries [200]VgpuSchedulerLogEntry +} + +type VgpuSchedulerGetState struct { + SchedulerPolicy uint32 + IsEnabledARR uint32 + SchedulerParams [8]byte +} + +type VgpuSchedulerSetParamsVgpuSchedDataWithARR struct { + AvgFactor uint32 + Frequency uint32 +} + +type VgpuSchedulerSetParamsVgpuSchedData struct { + Timeslice uint32 +} + +const sizeofVgpuSchedulerSetParams = unsafe.Sizeof([8]byte{}) + +type VgpuSchedulerSetParams [sizeofVgpuSchedulerSetParams]byte + +type VgpuSchedulerSetState struct { + SchedulerPolicy uint32 + EnableARRMode uint32 + SchedulerParams [8]byte +} + +type VgpuSchedulerCapabilities struct { + SupportedSchedulers [3]uint32 + MaxTimeslice uint32 + MinTimeslice uint32 + IsArrModeSupported uint32 + MaxFrequencyForARR uint32 + MinFrequencyForARR uint32 + MaxAvgFactorForARR uint32 + MinAvgFactorForARR uint32 +} + +type VgpuLicenseExpiry struct { + Year uint32 + Month uint16 + Day uint16 + Hour uint16 + Min uint16 + Sec uint16 + Status uint8 + Pad_cgo_0 [1]byte +} + +type VgpuLicenseInfo struct { + IsLicensed uint8 + LicenseExpiry VgpuLicenseExpiry + CurrentState uint32 +} + +type ProcessUtilizationSample struct { + Pid uint32 + TimeStamp uint64 + SmUtil uint32 + MemUtil uint32 + EncUtil uint32 + DecUtil uint32 +} + +type GridLicenseExpiry struct { + Year uint32 + Month uint16 + Day uint16 + Hour uint16 + Min uint16 + Sec uint16 + Status uint8 + Pad_cgo_0 [1]byte +} + +type GridLicensableFeature struct { + FeatureCode uint32 + FeatureState uint32 + LicenseInfo [128]int8 + ProductName [128]int8 + FeatureEnabled uint32 + LicenseExpiry GridLicenseExpiry +} + +type GridLicensableFeatures struct { + IsGridLicenseSupported int32 + LicensableFeaturesCount uint32 + GridLicensableFeatures [3]GridLicensableFeature +} + +type DeviceArchitecture uint32 + +type BusType uint32 + +type FanControlPolicy uint32 + +type PowerSource uint32 + +type GpuDynamicPstatesInfoUtilization struct { + BIsPresent uint32 + Percentage uint32 + IncThreshold uint32 + DecThreshold uint32 +} + +type GpuDynamicPstatesInfo struct { + Flags uint32 + Utilization [8]GpuDynamicPstatesInfoUtilization +} + +type FieldValue struct { + FieldId uint32 + ScopeId uint32 + Timestamp int64 + LatencyUsec int64 + ValueType uint32 + NvmlReturn uint32 + Value [8]byte +} + +type Unit struct { + Handle *_Ctype_struct_nvmlUnit_st +} + +type HwbcEntry struct { + HwbcId uint32 + FirmwareVersion [32]int8 +} + +type LedState struct { + Cause [256]int8 + Color uint32 +} + +type UnitInfo struct { + Name [96]int8 + Id [96]int8 + Serial [96]int8 + FirmwareVersion [96]int8 +} + +type PSUInfo struct { + State [256]int8 + Current uint32 + Voltage uint32 + Power uint32 +} + +type UnitFanInfo struct { + Speed uint32 + State uint32 +} + +type UnitFanSpeeds struct { + Fans [24]UnitFanInfo + Count uint32 +} + +type EventSet struct { + Handle *_Ctype_struct_nvmlEventSet_st +} + +type EventData struct { + Device Device + EventType uint64 + EventData uint64 + GpuInstanceId uint32 + ComputeInstanceId uint32 +} + +type AccountingStats struct { + GpuUtilization uint32 + MemoryUtilization uint32 + MaxMemoryUsage uint64 + Time uint64 + StartTime uint64 + IsRunning uint32 + Reserved [5]uint32 +} + +type EncoderSessionInfo struct { + SessionId uint32 + Pid uint32 + VgpuInstance uint32 + CodecType uint32 + HResolution uint32 + VResolution uint32 + AverageFps uint32 + AverageLatency uint32 +} + +type FBCStats struct { + SessionsCount uint32 + AverageFPS uint32 + AverageLatency uint32 +} + +type FBCSessionInfo struct { + SessionId uint32 + Pid uint32 + VgpuInstance uint32 + DisplayOrdinal uint32 + SessionType uint32 + SessionFlags uint32 + HMaxResolution uint32 + VMaxResolution uint32 + HResolution uint32 + VResolution uint32 + AverageFPS uint32 + AverageLatency uint32 +} + +type GpuFabricState byte + +type GpuFabricInfo struct { + ClusterUuid [16]int8 + Status uint32 + PartitionId uint32 + State uint8 + Pad_cgo_0 [3]byte +} + +type AffinityScope uint32 + +type VgpuVersion struct { + MinVersion uint32 + MaxVersion uint32 +} + +type nvmlVgpuMetadata struct { + Version uint32 + Revision uint32 + GuestInfoState uint32 + GuestDriverVersion [80]int8 + HostDriverVersion [80]int8 + Reserved [6]uint32 + VgpuVirtualizationCaps uint32 + GuestVgpuVersion uint32 + OpaqueDataSize uint32 + OpaqueData [4]int8 +} + +type nvmlVgpuPgpuMetadata struct { + Version uint32 + Revision uint32 + HostDriverVersion [80]int8 + PgpuVirtualizationCaps uint32 + Reserved [5]uint32 + HostSupportedVgpuRange VgpuVersion + OpaqueDataSize uint32 + OpaqueData [4]int8 +} + +type VgpuPgpuCompatibility struct { + VgpuVmCompatibility uint32 + CompatibilityLimitCode uint32 +} + +type ExcludedDeviceInfo struct { + PciInfo PciInfo + Uuid [80]int8 +} + +type GpuInstancePlacement struct { + Start uint32 + Size uint32 +} + +type GpuInstanceProfileInfo struct { + Id uint32 + IsP2pSupported uint32 + SliceCount uint32 + InstanceCount uint32 + MultiprocessorCount uint32 + CopyEngineCount uint32 + DecoderCount uint32 + EncoderCount uint32 + JpegCount uint32 + OfaCount uint32 + MemorySizeMB uint64 +} + +type GpuInstanceProfileInfo_v2 struct { + Version uint32 + Id uint32 + IsP2pSupported uint32 + SliceCount uint32 + InstanceCount uint32 + MultiprocessorCount uint32 + CopyEngineCount uint32 + DecoderCount uint32 + EncoderCount uint32 + JpegCount uint32 + OfaCount uint32 + MemorySizeMB uint64 + Name [96]int8 +} + +type GpuInstanceInfo struct { + Device Device + Id uint32 + ProfileId uint32 + Placement GpuInstancePlacement +} + +type GpuInstance struct { + Handle *_Ctype_struct_nvmlGpuInstance_st +} + +type ComputeInstancePlacement struct { + Start uint32 + Size uint32 +} + +type ComputeInstanceProfileInfo struct { + Id uint32 + SliceCount uint32 + InstanceCount uint32 + MultiprocessorCount uint32 + SharedCopyEngineCount uint32 + SharedDecoderCount uint32 + SharedEncoderCount uint32 + SharedJpegCount uint32 + SharedOfaCount uint32 +} + +type ComputeInstanceProfileInfo_v2 struct { + Version uint32 + Id uint32 + SliceCount uint32 + InstanceCount uint32 + MultiprocessorCount uint32 + SharedCopyEngineCount uint32 + SharedDecoderCount uint32 + SharedEncoderCount uint32 + SharedJpegCount uint32 + SharedOfaCount uint32 + Name [96]int8 +} + +type ComputeInstanceInfo struct { + Device Device + GpuInstance GpuInstance + Id uint32 + ProfileId uint32 + Placement ComputeInstancePlacement +} + +type ComputeInstance struct { + Handle *_Ctype_struct_nvmlComputeInstance_st +} + +type GpmSample struct { + Handle *_Ctype_struct_nvmlGpmSample_st +} + +type GpmMetricMetricInfo struct { + ShortName *int8 + LongName *int8 + Unit *int8 +} + +type GpmMetric struct { + MetricId uint32 + NvmlReturn uint32 + Value float64 + MetricInfo GpmMetricMetricInfo +} + +type GpmMetricsGetType struct { + Version uint32 + NumMetrics uint32 + Sample1 GpmSample + Sample2 GpmSample + Metrics [98]GpmMetric +} + +type GpmSupport struct { + Version uint32 + IsSupportedDevice uint32 +} + +type NvLinkPowerThres struct { + LowPwrThreshold uint32 +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/unit.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/unit.go new file mode 100644 index 0000000..aba916a --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/unit.go @@ -0,0 +1,113 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +// nvml.UnitGetCount() +func UnitGetCount() (int, Return) { + var UnitCount uint32 + ret := nvmlUnitGetCount(&UnitCount) + return int(UnitCount), ret +} + +// nvml.UnitGetHandleByIndex() +func UnitGetHandleByIndex(Index int) (Unit, Return) { + var Unit Unit + ret := nvmlUnitGetHandleByIndex(uint32(Index), &Unit) + return Unit, ret +} + +// nvml.UnitGetUnitInfo() +func UnitGetUnitInfo(Unit Unit) (UnitInfo, Return) { + var Info UnitInfo + ret := nvmlUnitGetUnitInfo(Unit, &Info) + return Info, ret +} + +func (Unit Unit) GetUnitInfo() (UnitInfo, Return) { + return UnitGetUnitInfo(Unit) +} + +// nvml.UnitGetLedState() +func UnitGetLedState(Unit Unit) (LedState, Return) { + var State LedState + ret := nvmlUnitGetLedState(Unit, &State) + return State, ret +} + +func (Unit Unit) GetLedState() (LedState, Return) { + return UnitGetLedState(Unit) +} + +// nvml.UnitGetPsuInfo() +func UnitGetPsuInfo(Unit Unit) (PSUInfo, Return) { + var Psu PSUInfo + ret := nvmlUnitGetPsuInfo(Unit, &Psu) + return Psu, ret +} + +func (Unit Unit) GetPsuInfo() (PSUInfo, Return) { + return UnitGetPsuInfo(Unit) +} + +// nvml.UnitGetTemperature() +func UnitGetTemperature(Unit Unit, Type int) (uint32, Return) { + var Temp uint32 + ret := nvmlUnitGetTemperature(Unit, uint32(Type), &Temp) + return Temp, ret +} + +func (Unit Unit) GetTemperature(Type int) (uint32, Return) { + return UnitGetTemperature(Unit, Type) +} + +// nvml.UnitGetFanSpeedInfo() +func UnitGetFanSpeedInfo(Unit Unit) (UnitFanSpeeds, Return) { + var FanSpeeds UnitFanSpeeds + ret := nvmlUnitGetFanSpeedInfo(Unit, &FanSpeeds) + return FanSpeeds, ret +} + +func (Unit Unit) GetFanSpeedInfo() (UnitFanSpeeds, Return) { + return UnitGetFanSpeedInfo(Unit) +} + +// nvml.UnitGetDevices() +func UnitGetDevices(Unit Unit) ([]Device, Return) { + var DeviceCount uint32 = 1 // Will be reduced upon returning + for { + Devices := make([]Device, DeviceCount) + ret := nvmlUnitGetDevices(Unit, &DeviceCount, &Devices[0]) + if ret == SUCCESS { + return Devices[:DeviceCount], ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + DeviceCount *= 2 + } +} + +func (Unit Unit) GetDevices() ([]Device, Return) { + return UnitGetDevices(Unit) +} + +// nvml.UnitSetLedState() +func UnitSetLedState(Unit Unit, Color LedColor) Return { + return nvmlUnitSetLedState(Unit, Color) +} + +func (Unit Unit) SetLedState(Color LedColor) Return { + return UnitSetLedState(Unit, Color) +} diff --git a/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/vgpu.go b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/vgpu.go new file mode 100644 index 0000000..bbb93e3 --- /dev/null +++ b/vendor/github.com/NVIDIA/go-nvml/pkg/nvml/vgpu.go @@ -0,0 +1,480 @@ +// Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nvml + +import ( + "unsafe" +) + +// nvml.VgpuMetadata +type VgpuMetadata struct { + nvmlVgpuMetadata + OpaqueData []byte +} + +// nvml.VgpuPgpuMetadata +type VgpuPgpuMetadata struct { + nvmlVgpuPgpuMetadata + OpaqueData []byte +} + +// nvml.VgpuTypeGetClass() +func VgpuTypeGetClass(VgpuTypeId VgpuTypeId) (string, Return) { + var Size uint32 = DEVICE_NAME_BUFFER_SIZE + VgpuTypeClass := make([]byte, DEVICE_NAME_BUFFER_SIZE) + ret := nvmlVgpuTypeGetClass(VgpuTypeId, &VgpuTypeClass[0], &Size) + return string(VgpuTypeClass[:clen(VgpuTypeClass)]), ret +} + +func (VgpuTypeId VgpuTypeId) GetClass() (string, Return) { + return VgpuTypeGetClass(VgpuTypeId) +} + +// nvml.VgpuTypeGetName() +func VgpuTypeGetName(VgpuTypeId VgpuTypeId) (string, Return) { + var Size uint32 = DEVICE_NAME_BUFFER_SIZE + VgpuTypeName := make([]byte, DEVICE_NAME_BUFFER_SIZE) + ret := nvmlVgpuTypeGetName(VgpuTypeId, &VgpuTypeName[0], &Size) + return string(VgpuTypeName[:clen(VgpuTypeName)]), ret +} + +func (VgpuTypeId VgpuTypeId) GetName() (string, Return) { + return VgpuTypeGetName(VgpuTypeId) +} + +// nvml.VgpuTypeGetGpuInstanceProfileId() +func VgpuTypeGetGpuInstanceProfileId(VgpuTypeId VgpuTypeId) (uint32, Return) { + var Size uint32 + ret := nvmlVgpuTypeGetGpuInstanceProfileId(VgpuTypeId, &Size) + return Size, ret +} + +func (VgpuTypeId VgpuTypeId) GetGpuInstanceProfileId() (uint32, Return) { + return VgpuTypeGetGpuInstanceProfileId(VgpuTypeId) +} + +// nvml.VgpuTypeGetDeviceID() +func VgpuTypeGetDeviceID(VgpuTypeId VgpuTypeId) (uint64, uint64, Return) { + var DeviceID, SubsystemID uint64 + ret := nvmlVgpuTypeGetDeviceID(VgpuTypeId, &DeviceID, &SubsystemID) + return DeviceID, SubsystemID, ret +} + +func (VgpuTypeId VgpuTypeId) GetDeviceID() (uint64, uint64, Return) { + return VgpuTypeGetDeviceID(VgpuTypeId) +} + +// nvml.VgpuTypeGetFramebufferSize() +func VgpuTypeGetFramebufferSize(VgpuTypeId VgpuTypeId) (uint64, Return) { + var FbSize uint64 + ret := nvmlVgpuTypeGetFramebufferSize(VgpuTypeId, &FbSize) + return FbSize, ret +} + +func (VgpuTypeId VgpuTypeId) GetFramebufferSize() (uint64, Return) { + return VgpuTypeGetFramebufferSize(VgpuTypeId) +} + +// nvml.VgpuTypeGetNumDisplayHeads() +func VgpuTypeGetNumDisplayHeads(VgpuTypeId VgpuTypeId) (int, Return) { + var NumDisplayHeads uint32 + ret := nvmlVgpuTypeGetNumDisplayHeads(VgpuTypeId, &NumDisplayHeads) + return int(NumDisplayHeads), ret +} + +func (VgpuTypeId VgpuTypeId) GetNumDisplayHeads() (int, Return) { + return VgpuTypeGetNumDisplayHeads(VgpuTypeId) +} + +// nvml.VgpuTypeGetResolution() +func VgpuTypeGetResolution(VgpuTypeId VgpuTypeId, DisplayIndex int) (uint32, uint32, Return) { + var Xdim, Ydim uint32 + ret := nvmlVgpuTypeGetResolution(VgpuTypeId, uint32(DisplayIndex), &Xdim, &Ydim) + return Xdim, Ydim, ret +} + +func (VgpuTypeId VgpuTypeId) GetResolution(DisplayIndex int) (uint32, uint32, Return) { + return VgpuTypeGetResolution(VgpuTypeId, DisplayIndex) +} + +// nvml.VgpuTypeGetLicense() +func VgpuTypeGetLicense(VgpuTypeId VgpuTypeId) (string, Return) { + VgpuTypeLicenseString := make([]byte, GRID_LICENSE_BUFFER_SIZE) + ret := nvmlVgpuTypeGetLicense(VgpuTypeId, &VgpuTypeLicenseString[0], GRID_LICENSE_BUFFER_SIZE) + return string(VgpuTypeLicenseString[:clen(VgpuTypeLicenseString)]), ret +} + +func (VgpuTypeId VgpuTypeId) GetLicense() (string, Return) { + return VgpuTypeGetLicense(VgpuTypeId) +} + +// nvml.VgpuTypeGetFrameRateLimit() +func VgpuTypeGetFrameRateLimit(VgpuTypeId VgpuTypeId) (uint32, Return) { + var FrameRateLimit uint32 + ret := nvmlVgpuTypeGetFrameRateLimit(VgpuTypeId, &FrameRateLimit) + return FrameRateLimit, ret +} + +func (VgpuTypeId VgpuTypeId) GetFrameRateLimit() (uint32, Return) { + return VgpuTypeGetFrameRateLimit(VgpuTypeId) +} + +// nvml.VgpuTypeGetMaxInstances() +func VgpuTypeGetMaxInstances(Device Device, VgpuTypeId VgpuTypeId) (int, Return) { + var VgpuInstanceCount uint32 + ret := nvmlVgpuTypeGetMaxInstances(Device, VgpuTypeId, &VgpuInstanceCount) + return int(VgpuInstanceCount), ret +} + +func (Device Device) VgpuTypeGetMaxInstances(VgpuTypeId VgpuTypeId) (int, Return) { + return VgpuTypeGetMaxInstances(Device, VgpuTypeId) +} + +func (VgpuTypeId VgpuTypeId) GetMaxInstances(Device Device) (int, Return) { + return VgpuTypeGetMaxInstances(Device, VgpuTypeId) +} + +// nvml.VgpuTypeGetMaxInstancesPerVm() +func VgpuTypeGetMaxInstancesPerVm(VgpuTypeId VgpuTypeId) (int, Return) { + var VgpuInstanceCountPerVm uint32 + ret := nvmlVgpuTypeGetMaxInstancesPerVm(VgpuTypeId, &VgpuInstanceCountPerVm) + return int(VgpuInstanceCountPerVm), ret +} + +func (VgpuTypeId VgpuTypeId) GetMaxInstancesPerVm() (int, Return) { + return VgpuTypeGetMaxInstancesPerVm(VgpuTypeId) +} + +// nvml.VgpuInstanceGetVmID() +func VgpuInstanceGetVmID(VgpuInstance VgpuInstance) (string, VgpuVmIdType, Return) { + var VmIdType VgpuVmIdType + VmId := make([]byte, DEVICE_UUID_BUFFER_SIZE) + ret := nvmlVgpuInstanceGetVmID(VgpuInstance, &VmId[0], DEVICE_UUID_BUFFER_SIZE, &VmIdType) + return string(VmId[:clen(VmId)]), VmIdType, ret +} + +func (VgpuInstance VgpuInstance) GetVmID() (string, VgpuVmIdType, Return) { + return VgpuInstanceGetVmID(VgpuInstance) +} + +// nvml.VgpuInstanceGetUUID() +func VgpuInstanceGetUUID(VgpuInstance VgpuInstance) (string, Return) { + Uuid := make([]byte, DEVICE_UUID_BUFFER_SIZE) + ret := nvmlVgpuInstanceGetUUID(VgpuInstance, &Uuid[0], DEVICE_UUID_BUFFER_SIZE) + return string(Uuid[:clen(Uuid)]), ret +} + +func (VgpuInstance VgpuInstance) GetUUID() (string, Return) { + return VgpuInstanceGetUUID(VgpuInstance) +} + +// nvml.VgpuInstanceGetVmDriverVersion() +func VgpuInstanceGetVmDriverVersion(VgpuInstance VgpuInstance) (string, Return) { + Version := make([]byte, SYSTEM_DRIVER_VERSION_BUFFER_SIZE) + ret := nvmlVgpuInstanceGetVmDriverVersion(VgpuInstance, &Version[0], SYSTEM_DRIVER_VERSION_BUFFER_SIZE) + return string(Version[:clen(Version)]), ret +} + +func (VgpuInstance VgpuInstance) GetVmDriverVersion() (string, Return) { + return VgpuInstanceGetVmDriverVersion(VgpuInstance) +} + +// nvml.VgpuInstanceGetFbUsage() +func VgpuInstanceGetFbUsage(VgpuInstance VgpuInstance) (uint64, Return) { + var FbUsage uint64 + ret := nvmlVgpuInstanceGetFbUsage(VgpuInstance, &FbUsage) + return FbUsage, ret +} + +func (VgpuInstance VgpuInstance) GetFbUsage() (uint64, Return) { + return VgpuInstanceGetFbUsage(VgpuInstance) +} + +// nvml.VgpuInstanceGetLicenseInfo() +func VgpuInstanceGetLicenseInfo(VgpuInstance VgpuInstance) (VgpuLicenseInfo, Return) { + var LicenseInfo VgpuLicenseInfo + ret := nvmlVgpuInstanceGetLicenseInfo(VgpuInstance, &LicenseInfo) + return LicenseInfo, ret +} + +func (VgpuInstance VgpuInstance) GetLicenseInfo() (VgpuLicenseInfo, Return) { + return VgpuInstanceGetLicenseInfo(VgpuInstance) +} + +// nvml.VgpuInstanceGetLicenseStatus() +func VgpuInstanceGetLicenseStatus(VgpuInstance VgpuInstance) (int, Return) { + var Licensed uint32 + ret := nvmlVgpuInstanceGetLicenseStatus(VgpuInstance, &Licensed) + return int(Licensed), ret +} + +func (VgpuInstance VgpuInstance) GetLicenseStatus() (int, Return) { + return VgpuInstanceGetLicenseStatus(VgpuInstance) +} + +// nvml.VgpuInstanceGetType() +func VgpuInstanceGetType(VgpuInstance VgpuInstance) (VgpuTypeId, Return) { + var VgpuTypeId VgpuTypeId + ret := nvmlVgpuInstanceGetType(VgpuInstance, &VgpuTypeId) + return VgpuTypeId, ret +} + +func (VgpuInstance VgpuInstance) GetType() (VgpuTypeId, Return) { + return VgpuInstanceGetType(VgpuInstance) +} + +// nvml.VgpuInstanceGetFrameRateLimit() +func VgpuInstanceGetFrameRateLimit(VgpuInstance VgpuInstance) (uint32, Return) { + var FrameRateLimit uint32 + ret := nvmlVgpuInstanceGetFrameRateLimit(VgpuInstance, &FrameRateLimit) + return FrameRateLimit, ret +} + +func (VgpuInstance VgpuInstance) GetFrameRateLimit() (uint32, Return) { + return VgpuInstanceGetFrameRateLimit(VgpuInstance) +} + +// nvml.VgpuInstanceGetEccMode() +func VgpuInstanceGetEccMode(VgpuInstance VgpuInstance) (EnableState, Return) { + var EccMode EnableState + ret := nvmlVgpuInstanceGetEccMode(VgpuInstance, &EccMode) + return EccMode, ret +} + +func (VgpuInstance VgpuInstance) GetEccMode() (EnableState, Return) { + return VgpuInstanceGetEccMode(VgpuInstance) +} + +// nvml.VgpuInstanceGetEncoderCapacity() +func VgpuInstanceGetEncoderCapacity(VgpuInstance VgpuInstance) (int, Return) { + var EncoderCapacity uint32 + ret := nvmlVgpuInstanceGetEncoderCapacity(VgpuInstance, &EncoderCapacity) + return int(EncoderCapacity), ret +} + +func (VgpuInstance VgpuInstance) GetEncoderCapacity() (int, Return) { + return VgpuInstanceGetEncoderCapacity(VgpuInstance) +} + +// nvml.VgpuInstanceSetEncoderCapacity() +func VgpuInstanceSetEncoderCapacity(VgpuInstance VgpuInstance, EncoderCapacity int) Return { + return nvmlVgpuInstanceSetEncoderCapacity(VgpuInstance, uint32(EncoderCapacity)) +} + +func (VgpuInstance VgpuInstance) SetEncoderCapacity(EncoderCapacity int) Return { + return VgpuInstanceSetEncoderCapacity(VgpuInstance, EncoderCapacity) +} + +// nvml.VgpuInstanceGetEncoderStats() +func VgpuInstanceGetEncoderStats(VgpuInstance VgpuInstance) (int, uint32, uint32, Return) { + var SessionCount, AverageFps, AverageLatency uint32 + ret := nvmlVgpuInstanceGetEncoderStats(VgpuInstance, &SessionCount, &AverageFps, &AverageLatency) + return int(SessionCount), AverageFps, AverageLatency, ret +} + +func (VgpuInstance VgpuInstance) GetEncoderStats() (int, uint32, uint32, Return) { + return VgpuInstanceGetEncoderStats(VgpuInstance) +} + +// nvml.VgpuInstanceGetEncoderSessions() +func VgpuInstanceGetEncoderSessions(VgpuInstance VgpuInstance) (int, EncoderSessionInfo, Return) { + var SessionCount uint32 + var SessionInfo EncoderSessionInfo + ret := nvmlVgpuInstanceGetEncoderSessions(VgpuInstance, &SessionCount, &SessionInfo) + return int(SessionCount), SessionInfo, ret +} + +func (VgpuInstance VgpuInstance) GetEncoderSessions() (int, EncoderSessionInfo, Return) { + return VgpuInstanceGetEncoderSessions(VgpuInstance) +} + +// nvml.VgpuInstanceGetFBCStats() +func VgpuInstanceGetFBCStats(VgpuInstance VgpuInstance) (FBCStats, Return) { + var FbcStats FBCStats + ret := nvmlVgpuInstanceGetFBCStats(VgpuInstance, &FbcStats) + return FbcStats, ret +} + +func (VgpuInstance VgpuInstance) GetFBCStats() (FBCStats, Return) { + return VgpuInstanceGetFBCStats(VgpuInstance) +} + +// nvml.VgpuInstanceGetFBCSessions() +func VgpuInstanceGetFBCSessions(VgpuInstance VgpuInstance) (int, FBCSessionInfo, Return) { + var SessionCount uint32 + var SessionInfo FBCSessionInfo + ret := nvmlVgpuInstanceGetFBCSessions(VgpuInstance, &SessionCount, &SessionInfo) + return int(SessionCount), SessionInfo, ret +} + +func (VgpuInstance VgpuInstance) GetFBCSessions() (int, FBCSessionInfo, Return) { + return VgpuInstanceGetFBCSessions(VgpuInstance) +} + +// nvml.VgpuInstanceGetGpuInstanceId() +func VgpuInstanceGetGpuInstanceId(VgpuInstance VgpuInstance) (int, Return) { + var gpuInstanceId uint32 + ret := nvmlVgpuInstanceGetGpuInstanceId(VgpuInstance, &gpuInstanceId) + return int(gpuInstanceId), ret +} + +func (VgpuInstance VgpuInstance) GetGpuInstanceId() (int, Return) { + return VgpuInstanceGetGpuInstanceId(VgpuInstance) +} + +// nvml.VgpuInstanceGetGpuPciId() +func VgpuInstanceGetGpuPciId(VgpuInstance VgpuInstance) (string, Return) { + var Length uint32 = 1 // Will be reduced upon returning + for { + VgpuPciId := make([]byte, Length) + ret := nvmlVgpuInstanceGetGpuPciId(VgpuInstance, &VgpuPciId[0], &Length) + if ret == SUCCESS { + return string(VgpuPciId[:clen(VgpuPciId)]), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return "", ret + } + Length *= 2 + } +} + +func (VgpuInstance VgpuInstance) GetGpuPciId() (string, Return) { + return VgpuInstanceGetGpuPciId(VgpuInstance) +} + +// nvml.VgpuInstanceGetMetadata() +func VgpuInstanceGetMetadata(VgpuInstance VgpuInstance) (VgpuMetadata, Return) { + var VgpuMetadata VgpuMetadata + OpaqueDataSize := unsafe.Sizeof(VgpuMetadata.nvmlVgpuMetadata.OpaqueData) + VgpuMetadataSize := unsafe.Sizeof(VgpuMetadata.nvmlVgpuMetadata) - OpaqueDataSize + for { + BufferSize := uint32(VgpuMetadataSize + OpaqueDataSize) + Buffer := make([]byte, BufferSize) + nvmlVgpuMetadataPtr := (*nvmlVgpuMetadata)(unsafe.Pointer(&Buffer[0])) + ret := nvmlVgpuInstanceGetMetadata(VgpuInstance, nvmlVgpuMetadataPtr, &BufferSize) + if ret == SUCCESS { + VgpuMetadata.nvmlVgpuMetadata = *nvmlVgpuMetadataPtr + VgpuMetadata.OpaqueData = Buffer[VgpuMetadataSize:BufferSize] + return VgpuMetadata, ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return VgpuMetadata, ret + } + OpaqueDataSize = 2 * OpaqueDataSize + } +} + +func (VgpuInstance VgpuInstance) GetMetadata() (VgpuMetadata, Return) { + return VgpuInstanceGetMetadata(VgpuInstance) +} + +// nvml.VgpuInstanceGetAccountingMode() +func VgpuInstanceGetAccountingMode(VgpuInstance VgpuInstance) (EnableState, Return) { + var Mode EnableState + ret := nvmlVgpuInstanceGetAccountingMode(VgpuInstance, &Mode) + return Mode, ret +} + +func (VgpuInstance VgpuInstance) GetAccountingMode() (EnableState, Return) { + return VgpuInstanceGetAccountingMode(VgpuInstance) +} + +// nvml.VgpuInstanceGetAccountingPids() +func VgpuInstanceGetAccountingPids(VgpuInstance VgpuInstance) ([]int, Return) { + var Count uint32 = 1 // Will be reduced upon returning + for { + Pids := make([]uint32, Count) + ret := nvmlVgpuInstanceGetAccountingPids(VgpuInstance, &Count, &Pids[0]) + if ret == SUCCESS { + return uint32SliceToIntSlice(Pids[:Count]), ret + } + if ret != ERROR_INSUFFICIENT_SIZE { + return nil, ret + } + Count *= 2 + } +} + +func (VgpuInstance VgpuInstance) GetAccountingPids() ([]int, Return) { + return VgpuInstanceGetAccountingPids(VgpuInstance) +} + +// nvml.VgpuInstanceGetAccountingStats() +func VgpuInstanceGetAccountingStats(VgpuInstance VgpuInstance, Pid int) (AccountingStats, Return) { + var Stats AccountingStats + ret := nvmlVgpuInstanceGetAccountingStats(VgpuInstance, uint32(Pid), &Stats) + return Stats, ret +} + +func (VgpuInstance VgpuInstance) GetAccountingStats(Pid int) (AccountingStats, Return) { + return VgpuInstanceGetAccountingStats(VgpuInstance, Pid) +} + +// nvml.GetVgpuCompatibility() +func GetVgpuCompatibility(nvmlVgpuMetadata *nvmlVgpuMetadata, PgpuMetadata *nvmlVgpuPgpuMetadata) (VgpuPgpuCompatibility, Return) { + var CompatibilityInfo VgpuPgpuCompatibility + ret := nvmlGetVgpuCompatibility(nvmlVgpuMetadata, PgpuMetadata, &CompatibilityInfo) + return CompatibilityInfo, ret +} + +// nvml.GetVgpuVersion() +func GetVgpuVersion() (VgpuVersion, VgpuVersion, Return) { + var Supported, Current VgpuVersion + ret := nvmlGetVgpuVersion(&Supported, &Current) + return Supported, Current, ret +} + +// nvml.SetVgpuVersion() +func SetVgpuVersion(VgpuVersion *VgpuVersion) Return { + return SetVgpuVersion(VgpuVersion) +} + +// nvml.VgpuInstanceClearAccountingPids() +func VgpuInstanceClearAccountingPids(VgpuInstance VgpuInstance) Return { + return nvmlVgpuInstanceClearAccountingPids(VgpuInstance) +} + +func (VgpuInstance VgpuInstance) ClearAccountingPids() Return { + return VgpuInstanceClearAccountingPids(VgpuInstance) +} + +// nvml.VgpuInstanceGetMdevUUID() +func VgpuInstanceGetMdevUUID(VgpuInstance VgpuInstance) (string, Return) { + MdevUuid := make([]byte, DEVICE_UUID_BUFFER_SIZE) + ret := nvmlVgpuInstanceGetMdevUUID(VgpuInstance, &MdevUuid[0], DEVICE_UUID_BUFFER_SIZE) + return string(MdevUuid[:clen(MdevUuid)]), ret +} + +func (VgpuInstance VgpuInstance) GetMdevUUID() (string, Return) { + return VgpuInstanceGetMdevUUID(VgpuInstance) +} + +// nvml.VgpuTypeGetCapabilities() +func VgpuTypeGetCapabilities(VgpuTypeId VgpuTypeId, Capability VgpuCapability) (bool, Return) { + var CapResult uint32 + ret := nvmlVgpuTypeGetCapabilities(VgpuTypeId, Capability, &CapResult) + return (CapResult != 0), ret +} + +func (VgpuTypeId VgpuTypeId) GetCapabilities(Capability VgpuCapability) (bool, Return) { + return VgpuTypeGetCapabilities(VgpuTypeId, Capability) +} + +// nvml.GetVgpuDriverCapabilities() +func GetVgpuDriverCapabilities(Capability VgpuDriverCapability) (bool, Return) { + var CapResult uint32 + ret := nvmlGetVgpuDriverCapabilities(Capability, &CapResult) + return (CapResult != 0), ret +} diff --git a/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/bindings.go b/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/bindings.go deleted file mode 100644 index 6898918..0000000 --- a/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/bindings.go +++ /dev/null @@ -1,848 +0,0 @@ -/* - * Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package nvml - -/* -#cgo linux LDFLAGS: -ldl -Wl,--unresolved-symbols=ignore-in-object-files -#cgo darwin LDFLAGS: -ldl -Wl,-undefined,dynamic_lookup -#cgo windows LDFLAGS: -LC:/Program\ Files/NVIDIA\ Corporation/NVSMI -lnvml -#include "nvml.h" - -#undef nvmlEventSetWait -nvmlReturn_t DECLDIR nvmlEventSetWait(nvmlEventSet_t set, nvmlEventData_t * data, unsigned int timeoutms); -nvmlReturn_t DECLDIR nvmlEventSetWait_v2(nvmlEventSet_t set, nvmlEventData_t * data, unsigned int timeoutms); -*/ -import "C" - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "sort" - "strconv" - "strings" -) - -const ( - szDriver = C.NVML_SYSTEM_DRIVER_VERSION_BUFFER_SIZE - szName = C.NVML_DEVICE_NAME_BUFFER_SIZE - szUUID = C.NVML_DEVICE_UUID_V2_BUFFER_SIZE - szProcs = 32 - szProcName = 64 - - XidCriticalError = C.nvmlEventTypeXidCriticalError -) - -type handle struct{ dev C.nvmlDevice_t } -type EventSet struct{ set C.nvmlEventSet_t } -type Event struct { - UUID *string - GpuInstanceId *uint - ComputeInstanceId *uint - Etype uint64 - Edata uint64 -} - -func uintPtr(c C.uint) *uint { - i := uint(c) - return &i -} - -func uint64Ptr(c C.ulonglong) *uint64 { - i := uint64(c) - return &i -} - -func stringPtr(c *C.char) *string { - s := C.GoString(c) - return &s -} - -func errorString(ret C.nvmlReturn_t) error { - if ret == C.NVML_SUCCESS { - return nil - } - err := C.GoString(C.nvmlErrorString(ret)) - return fmt.Errorf("nvml: %v", err) -} - -func init_() error { - r := dl.nvmlInit() - if r == C.NVML_ERROR_LIBRARY_NOT_FOUND { - return errors.New("could not load NVML library") - } - return errorString(r) -} - -func NewEventSet() EventSet { - var set C.nvmlEventSet_t - C.nvmlEventSetCreate(&set) - - return EventSet{set} -} - -func RegisterEvent(es EventSet, event int) error { - n, err := deviceGetCount() - if err != nil { - return err - } - - var i uint - for i = 0; i < n; i++ { - h, err := deviceGetHandleByIndex(i) - if err != nil { - return err - } - - r := C.nvmlDeviceRegisterEvents(h.dev, C.ulonglong(event), es.set) - if r != C.NVML_SUCCESS { - return errorString(r) - } - } - - return nil -} - -func RegisterEventForDevice(es EventSet, event int, uuid string) error { - n, err := deviceGetCount() - if err != nil { - return err - } - - var i uint - for i = 0; i < n; i++ { - h, err := deviceGetHandleByIndex(i) - if err != nil { - return err - } - - duuid, err := h.deviceGetUUID() - if err != nil { - return err - } - - if *duuid != uuid { - continue - } - - r := C.nvmlDeviceRegisterEvents(h.dev, C.ulonglong(event), es.set) - if r != C.NVML_SUCCESS { - return errorString(r) - } - - return nil - } - - return fmt.Errorf("nvml: device not found") -} - -func DeleteEventSet(es EventSet) { - C.nvmlEventSetFree(es.set) -} - -func WaitForEvent(es EventSet, timeout uint) (Event, error) { - var data C.nvmlEventData_t - - r := dl.lookupSymbol("nvmlEventSetWait_v2") - if r == C.NVML_SUCCESS { - r = C.nvmlEventSetWait_v2(es.set, &data, C.uint(timeout)) - } else { - r = C.nvmlEventSetWait(es.set, &data, C.uint(timeout)) - data.gpuInstanceId = 0xFFFFFFFF - data.computeInstanceId = 0xFFFFFFFF - } - if r != C.NVML_SUCCESS { - return Event{}, errorString(r) - } - - uuid, _ := handle{data.device}.deviceGetUUID() - - return Event{ - UUID: uuid, - Etype: uint64(data.eventType), - Edata: uint64(data.eventData), - GpuInstanceId: uintPtr(data.gpuInstanceId), - ComputeInstanceId: uintPtr(data.computeInstanceId), - }, nil -} - -func shutdown() error { - return errorString(dl.nvmlShutdown()) -} - -func systemGetCudaDriverVersion() (*uint, *uint, error) { - var v C.int - - r := C.nvmlSystemGetCudaDriverVersion_v2(&v) - if r != C.NVML_SUCCESS { - return nil, nil, errorString(r) - } - - major := uint(v / 1000) - minor := uint(v % 1000 / 10) - - return &major, &minor, errorString(r) -} - -func systemGetDriverVersion() (string, error) { - var driver [szDriver]C.char - - r := C.nvmlSystemGetDriverVersion(&driver[0], szDriver) - return C.GoString(&driver[0]), errorString(r) -} - -func systemGetProcessName(pid uint) (string, error) { - var proc [szProcName]C.char - - r := C.nvmlSystemGetProcessName(C.uint(pid), &proc[0], szProcName) - return C.GoString(&proc[0]), errorString(r) -} - -func deviceGetCount() (uint, error) { - var n C.uint - - r := C.nvmlDeviceGetCount(&n) - return uint(n), errorString(r) -} - -func deviceGetHandleByIndex(idx uint) (handle, error) { - var dev C.nvmlDevice_t - - r := C.nvmlDeviceGetHandleByIndex(C.uint(idx), &dev) - return handle{dev}, errorString(r) -} - -func deviceGetHandleByUUID(uuid string) (handle, error) { - var dev C.nvmlDevice_t - - r := C.nvmlDeviceGetHandleByUUID(C.CString(uuid), &dev) - return handle{dev}, errorString(r) -} - -func deviceGetTopologyCommonAncestor(h1, h2 handle) (*uint, error) { - r := dl.lookupSymbol("nvmlDeviceGetTopologyCommonAncestor") - if r == C.NVML_ERROR_FUNCTION_NOT_FOUND { - return nil, nil - } - - var level C.nvmlGpuTopologyLevel_t - r = C.nvmlDeviceGetTopologyCommonAncestor(h1.dev, h2.dev, &level) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - - return uintPtr(C.uint(level)), errorString(r) -} - -func (h handle) deviceGetCudaComputeCapability() (*int, *int, error) { - var major, minor C.int - - r := C.nvmlDeviceGetCudaComputeCapability(h.dev, &major, &minor) - if r != C.NVML_SUCCESS { - return nil, nil, errorString(r) - } - - intMajor := int(major) - intMinor := int(minor) - - return &intMajor, &intMinor, errorString(r) -} - -func (h handle) deviceGetName() (*string, error) { - var name [szName]C.char - - r := C.nvmlDeviceGetName(h.dev, &name[0], szName) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return stringPtr(&name[0]), errorString(r) -} - -func (h handle) deviceGetIndex() (*uint, error) { - var index C.uint - r := C.nvmlDeviceGetIndex(h.dev, &index) - if r != C.NVML_SUCCESS { - return nil, errorString(r) - } - return uintPtr(index), nil -} - -func (h handle) deviceGetUUID() (*string, error) { - var uuid [szUUID]C.char - - r := C.nvmlDeviceGetUUID(h.dev, &uuid[0], szUUID) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return stringPtr(&uuid[0]), errorString(r) -} - -func (h handle) deviceGetPciInfo() (*string, error) { - var pci C.nvmlPciInfo_t - - r := C.nvmlDeviceGetPciInfo(h.dev, &pci) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return stringPtr(&pci.busId[0]), errorString(r) -} - -func (h handle) deviceGetMinorNumber() (*uint, error) { - var minor C.uint - - r := C.nvmlDeviceGetMinorNumber(h.dev, &minor) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(minor), errorString(r) -} - -func (h handle) deviceGetBAR1MemoryInfo() (*uint64, *uint64, error) { - var bar1 C.nvmlBAR1Memory_t - - r := C.nvmlDeviceGetBAR1MemoryInfo(h.dev, &bar1) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil, nil - } - return uint64Ptr(bar1.bar1Total), uint64Ptr(bar1.bar1Used), errorString(r) -} - -func (h handle) deviceGetNvLinkState(link uint) (*uint, error) { - var isActive C.nvmlEnableState_t - - r := C.nvmlDeviceGetNvLinkState(h.dev, C.uint(link), &isActive) - if r == C.NVML_ERROR_NOT_SUPPORTED || r == C.NVML_ERROR_INVALID_ARGUMENT { - return nil, nil - } - - return uintPtr(C.uint(isActive)), errorString(r) -} - -func (h handle) deviceGetNvLinkRemotePciInfo(link uint) (*string, error) { - var pci C.nvmlPciInfo_t - - r := C.nvmlDeviceGetNvLinkRemotePciInfo(h.dev, C.uint(link), &pci) - if r == C.NVML_ERROR_NOT_SUPPORTED || r == C.NVML_ERROR_INVALID_ARGUMENT { - return nil, nil - } - - return stringPtr(&pci.busId[0]), errorString(r) -} - -func (h handle) deviceGetAllNvLinkRemotePciInfo() ([]*string, error) { - busIds := []*string{} - - for i := uint(0); i < C.NVML_NVLINK_MAX_LINKS; i++ { - state, err := h.deviceGetNvLinkState(i) - if err != nil { - return nil, err - } - - if state == nil { - continue - } - - if *state == C.NVML_FEATURE_ENABLED { - pci, err := h.deviceGetNvLinkRemotePciInfo(i) - if err != nil { - return nil, err - } - - if pci == nil { - continue - } - - busIds = append(busIds, pci) - } - } - - return busIds, nil -} - -func (h handle) deviceGetPowerManagementLimit() (*uint, error) { - var power C.uint - - r := C.nvmlDeviceGetPowerManagementLimit(h.dev, &power) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(power), errorString(r) -} - -func (h handle) deviceGetMaxClockInfo() (*uint, *uint, error) { - var sm, mem C.uint - - r := C.nvmlDeviceGetMaxClockInfo(h.dev, C.NVML_CLOCK_SM, &sm) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil, nil - } - if r == C.NVML_SUCCESS { - r = C.nvmlDeviceGetMaxClockInfo(h.dev, C.NVML_CLOCK_MEM, &mem) - } - return uintPtr(sm), uintPtr(mem), errorString(r) -} - -func (h handle) deviceGetMaxPcieLinkGeneration() (*uint, error) { - var link C.uint - - r := C.nvmlDeviceGetMaxPcieLinkGeneration(h.dev, &link) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(link), errorString(r) -} - -func (h handle) deviceGetMaxPcieLinkWidth() (*uint, error) { - var width C.uint - - r := C.nvmlDeviceGetMaxPcieLinkWidth(h.dev, &width) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(width), errorString(r) -} - -func (h handle) deviceGetPowerUsage() (*uint, error) { - var power C.uint - - r := C.nvmlDeviceGetPowerUsage(h.dev, &power) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(power), errorString(r) -} - -func (h handle) deviceGetFanSpeed() (*uint, error) { - var speed C.uint - - r := C.nvmlDeviceGetFanSpeed(h.dev, &speed) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(speed), errorString(r) -} - -func (h handle) deviceGetTemperature() (*uint, error) { - var temp C.uint - - r := C.nvmlDeviceGetTemperature(h.dev, C.NVML_TEMPERATURE_GPU, &temp) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(temp), errorString(r) -} - -func (h handle) deviceGetUtilizationRates() (*uint, *uint, error) { - var usage C.nvmlUtilization_t - - r := C.nvmlDeviceGetUtilizationRates(h.dev, &usage) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil, nil - } - return uintPtr(usage.gpu), uintPtr(usage.memory), errorString(r) -} - -func (h handle) deviceGetEncoderUtilization() (*uint, error) { - var usage, sampling C.uint - - r := C.nvmlDeviceGetEncoderUtilization(h.dev, &usage, &sampling) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(usage), errorString(r) -} - -func (h handle) deviceGetDecoderUtilization() (*uint, error) { - var usage, sampling C.uint - - r := C.nvmlDeviceGetDecoderUtilization(h.dev, &usage, &sampling) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil - } - return uintPtr(usage), errorString(r) -} - -func (h handle) deviceGetMemoryInfo() (totalMem *uint64, devMem DeviceMemory, err error) { - var mem C.nvmlMemory_t - - r := C.nvmlDeviceGetMemoryInfo(h.dev, &mem) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return - } - - err = errorString(r) - if r != C.NVML_SUCCESS { - return - } - - totalMem = uint64Ptr(mem.total) - if totalMem != nil { - *totalMem /= 1024 * 1024 // MiB - } - - devMem = DeviceMemory{ - Used: uint64Ptr(mem.used), - Free: uint64Ptr(mem.free), - } - - if devMem.Used != nil { - *devMem.Used /= 1024 * 1024 // MiB - } - - if devMem.Free != nil { - *devMem.Free /= 1024 * 1024 // MiB - } - return -} - -func (h handle) deviceGetClockInfo() (*uint, *uint, error) { - var sm, mem C.uint - - r := C.nvmlDeviceGetClockInfo(h.dev, C.NVML_CLOCK_SM, &sm) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil, nil - } - if r == C.NVML_SUCCESS { - r = C.nvmlDeviceGetClockInfo(h.dev, C.NVML_CLOCK_MEM, &mem) - } - return uintPtr(sm), uintPtr(mem), errorString(r) -} - -func (h handle) deviceGetMemoryErrorCounter() (*uint64, *uint64, *uint64, error) { - var l1, l2, mem C.ulonglong - - r := C.nvmlDeviceGetMemoryErrorCounter(h.dev, C.NVML_MEMORY_ERROR_TYPE_UNCORRECTED, - C.NVML_VOLATILE_ECC, C.NVML_MEMORY_LOCATION_L1_CACHE, &l1) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil, nil, nil - } - if r == C.NVML_SUCCESS { - r = C.nvmlDeviceGetMemoryErrorCounter(h.dev, C.NVML_MEMORY_ERROR_TYPE_UNCORRECTED, - C.NVML_VOLATILE_ECC, C.NVML_MEMORY_LOCATION_L2_CACHE, &l2) - } - if r == C.NVML_SUCCESS { - r = C.nvmlDeviceGetMemoryErrorCounter(h.dev, C.NVML_MEMORY_ERROR_TYPE_UNCORRECTED, - C.NVML_VOLATILE_ECC, C.NVML_MEMORY_LOCATION_DEVICE_MEMORY, &mem) - } - return uint64Ptr(l1), uint64Ptr(l2), uint64Ptr(mem), errorString(r) -} - -func (h handle) deviceGetPcieThroughput() (*uint, *uint, error) { - var rx, tx C.uint - - r := C.nvmlDeviceGetPcieThroughput(h.dev, C.NVML_PCIE_UTIL_RX_BYTES, &rx) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil, nil - } - if r == C.NVML_SUCCESS { - r = C.nvmlDeviceGetPcieThroughput(h.dev, C.NVML_PCIE_UTIL_TX_BYTES, &tx) - } - return uintPtr(rx), uintPtr(tx), errorString(r) -} - -func (h handle) deviceGetComputeRunningProcesses() ([]uint, []uint64, error) { - var procs [szProcs]C.nvmlProcessInfo_t - var count = C.uint(szProcs) - - r := C.nvmlDeviceGetComputeRunningProcesses(h.dev, &count, &procs[0]) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil, nil - } - n := int(count) - pids := make([]uint, n) - mems := make([]uint64, n) - for i := 0; i < n; i++ { - pids[i] = uint(procs[i].pid) - mems[i] = uint64(procs[i].usedGpuMemory) - } - return pids, mems, errorString(r) -} - -func (h handle) deviceGetGraphicsRunningProcesses() ([]uint, []uint64, error) { - var procs [szProcs]C.nvmlProcessInfo_t - var count = C.uint(szProcs) - - r := C.nvmlDeviceGetGraphicsRunningProcesses(h.dev, &count, &procs[0]) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return nil, nil, nil - } - n := int(count) - pids := make([]uint, n) - mems := make([]uint64, n) - for i := 0; i < n; i++ { - pids[i] = uint(procs[i].pid) - mems[i] = uint64(procs[i].usedGpuMemory) - } - return pids, mems, errorString(r) -} - -func (h handle) deviceGetAllRunningProcesses() ([]ProcessInfo, error) { - cPids, cpMems, err := h.deviceGetComputeRunningProcesses() - if err != nil { - return nil, err - } - - gPids, gpMems, err := h.deviceGetGraphicsRunningProcesses() - if err != nil { - return nil, err - } - - allPids := make(map[uint]ProcessInfo) - - for i, pid := range cPids { - name, err := processName(pid) - if err != nil { - return nil, err - } - allPids[pid] = ProcessInfo{ - PID: pid, - Name: name, - MemoryUsed: cpMems[i] / (1024 * 1024), // MiB - Type: Compute, - } - - } - - for i, pid := range gPids { - pInfo, exists := allPids[pid] - if exists { - pInfo.Type = ComputeAndGraphics - allPids[pid] = pInfo - } else { - name, err := processName(pid) - if err != nil { - return nil, err - } - allPids[pid] = ProcessInfo{ - PID: pid, - Name: name, - MemoryUsed: gpMems[i] / (1024 * 1024), // MiB - Type: Graphics, - } - } - } - - var processInfo []ProcessInfo - for _, v := range allPids { - processInfo = append(processInfo, v) - } - sort.Slice(processInfo, func(i, j int) bool { - return processInfo[i].PID < processInfo[j].PID - }) - - return processInfo, nil -} - -func (h handle) getClocksThrottleReasons() (reason ThrottleReason, err error) { - var clocksThrottleReasons C.ulonglong - - r := C.nvmlDeviceGetCurrentClocksThrottleReasons(h.dev, &clocksThrottleReasons) - - if r == C.NVML_ERROR_NOT_SUPPORTED { - return ThrottleReasonUnknown, nil - } - - if r != C.NVML_SUCCESS { - return ThrottleReasonUnknown, errorString(r) - } - - switch clocksThrottleReasons { - case C.nvmlClocksThrottleReasonGpuIdle: - reason = ThrottleReasonGpuIdle - case C.nvmlClocksThrottleReasonApplicationsClocksSetting: - reason = ThrottleReasonApplicationsClocksSetting - case C.nvmlClocksThrottleReasonSwPowerCap: - reason = ThrottleReasonSwPowerCap - case C.nvmlClocksThrottleReasonHwSlowdown: - reason = ThrottleReasonHwSlowdown - case C.nvmlClocksThrottleReasonSyncBoost: - reason = ThrottleReasonSyncBoost - case C.nvmlClocksThrottleReasonSwThermalSlowdown: - reason = ThrottleReasonSwThermalSlowdown - case C.nvmlClocksThrottleReasonHwThermalSlowdown: - reason = ThrottleReasonHwThermalSlowdown - case C.nvmlClocksThrottleReasonHwPowerBrakeSlowdown: - reason = ThrottleReasonHwPowerBrakeSlowdown - case C.nvmlClocksThrottleReasonDisplayClockSetting: - reason = ThrottleReasonDisplayClockSetting - case C.nvmlClocksThrottleReasonNone: - reason = ThrottleReasonNone - } - return -} - -func (h handle) getPerformanceState() (PerfState, error) { - var pstate C.nvmlPstates_t - - r := C.nvmlDeviceGetPerformanceState(h.dev, &pstate) - - if r == C.NVML_ERROR_NOT_SUPPORTED { - return PerfStateUnknown, nil - } - - if r != C.NVML_SUCCESS { - return PerfStateUnknown, errorString(r) - } - return PerfState(pstate), nil -} - -func processName(pid uint) (string, error) { - f := `/proc/` + strconv.FormatUint(uint64(pid), 10) + `/comm` - d, err := ioutil.ReadFile(f) - - if err != nil { - // TOCTOU: process terminated - if os.IsNotExist(err) { - return "", nil - } - return "", err - } - return strings.TrimSuffix(string(d), "\n"), err -} - -func (h handle) getAccountingInfo() (accountingInfo Accounting, err error) { - var mode C.nvmlEnableState_t - var buffer C.uint - - r := C.nvmlDeviceGetAccountingMode(h.dev, &mode) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return - } - - if r != C.NVML_SUCCESS { - return accountingInfo, errorString(r) - } - - r = C.nvmlDeviceGetAccountingBufferSize(h.dev, &buffer) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return - } - - if r != C.NVML_SUCCESS { - return accountingInfo, errorString(r) - } - - accountingInfo = Accounting{ - Mode: ModeState(mode), - BufferSize: uintPtr(buffer), - } - return -} - -func (h handle) getDisplayInfo() (display Display, err error) { - var mode, isActive C.nvmlEnableState_t - - r := C.nvmlDeviceGetDisplayActive(h.dev, &mode) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return - } - - if r != C.NVML_SUCCESS { - return display, errorString(r) - } - - r = C.nvmlDeviceGetDisplayMode(h.dev, &isActive) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return - } - if r != C.NVML_SUCCESS { - return display, errorString(r) - } - display = Display{ - Mode: ModeState(mode), - Active: ModeState(isActive), - } - return -} - -func (h handle) getPeristenceMode() (state ModeState, err error) { - var mode C.nvmlEnableState_t - - r := C.nvmlDeviceGetPersistenceMode(h.dev, &mode) - if r == C.NVML_ERROR_NOT_SUPPORTED { - return - } - return ModeState(mode), errorString(r) -} - -func (h *handle) isMigEnabled() (bool, error) { - ret := dl.lookupSymbol("nvmlDeviceGetMigMode") - if ret != C.NVML_SUCCESS { - return false, nil - } - - var cm, pm C.uint - ret = C.nvmlDeviceGetMigMode(h.dev, &cm, &pm) - if ret == C.NVML_ERROR_NOT_SUPPORTED { - return false, nil - } - if ret != C.NVML_SUCCESS { - return false, errorString(ret) - } - - return (cm == C.NVML_DEVICE_MIG_ENABLE) && (cm == pm), nil -} - -func (h *handle) getMigDevices() ([]handle, error) { - ret := dl.lookupSymbol("nvmlDeviceGetMaxMigDeviceCount") - if ret != C.NVML_SUCCESS { - return nil, errorString(ret) - } - - var c C.uint - ret = C.nvmlDeviceGetMaxMigDeviceCount(h.dev, &c) - if ret != C.NVML_SUCCESS { - return nil, errorString(ret) - } - - ret = dl.lookupSymbol("nvmlDeviceGetMigDeviceHandleByIndex") - if ret != C.NVML_SUCCESS { - return nil, errorString(ret) - } - - var handles []handle - for i := 0; i < int(c); i++ { - var mig C.nvmlDevice_t - ret := C.nvmlDeviceGetMigDeviceHandleByIndex(h.dev, C.uint(i), &mig) - if ret == C.NVML_ERROR_NOT_FOUND { - continue - } - if ret != C.NVML_SUCCESS { - return nil, errorString(ret) - } - - handles = append(handles, handle{mig}) - } - - return handles, nil -} - -func (h *handle) deviceGetDeviceHandleFromMigDeviceHandle() (handle, error) { - ret := dl.lookupSymbol("nvmlDeviceGetDeviceHandleFromMigDeviceHandle") - if ret != C.NVML_SUCCESS { - return handle{}, errorString(ret) - } - - var parent C.nvmlDevice_t - ret = C.nvmlDeviceGetDeviceHandleFromMigDeviceHandle(h.dev, &parent) - if ret != C.NVML_SUCCESS { - return handle{}, errorString(ret) - } - - return handle{parent}, nil -} diff --git a/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/mig.go b/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/mig.go deleted file mode 100644 index 446ef2e..0000000 --- a/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/mig.go +++ /dev/null @@ -1,423 +0,0 @@ -// Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. - -package nvml - -import ( - "unsafe" -) - -// #include "nvml.h" -import "C" - -// Enable or disable MIG mode -const ( - DEVICE_MIG_DISABLE = C.NVML_DEVICE_MIG_DISABLE - DEVICE_MIG_ENABLE = C.NVML_DEVICE_MIG_ENABLE -) - -// GPU Instance Profiles -const ( - GPU_INSTANCE_PROFILE_1_SLICE = C.NVML_GPU_INSTANCE_PROFILE_1_SLICE - GPU_INSTANCE_PROFILE_2_SLICE = C.NVML_GPU_INSTANCE_PROFILE_2_SLICE - GPU_INSTANCE_PROFILE_3_SLICE = C.NVML_GPU_INSTANCE_PROFILE_3_SLICE - GPU_INSTANCE_PROFILE_4_SLICE = C.NVML_GPU_INSTANCE_PROFILE_4_SLICE - GPU_INSTANCE_PROFILE_7_SLICE = C.NVML_GPU_INSTANCE_PROFILE_7_SLICE - GPU_INSTANCE_PROFILE_COUNT = C.NVML_GPU_INSTANCE_PROFILE_COUNT -) - -// Compute Instance Profiles -const ( - COMPUTE_INSTANCE_PROFILE_1_SLICE = C.NVML_COMPUTE_INSTANCE_PROFILE_1_SLICE - COMPUTE_INSTANCE_PROFILE_2_SLICE = C.NVML_COMPUTE_INSTANCE_PROFILE_2_SLICE - COMPUTE_INSTANCE_PROFILE_3_SLICE = C.NVML_COMPUTE_INSTANCE_PROFILE_3_SLICE - COMPUTE_INSTANCE_PROFILE_4_SLICE = C.NVML_COMPUTE_INSTANCE_PROFILE_4_SLICE - COMPUTE_INSTANCE_PROFILE_7_SLICE = C.NVML_COMPUTE_INSTANCE_PROFILE_7_SLICE - COMPUTE_INSTANCE_PROFILE_COUNT = C.NVML_COMPUTE_INSTANCE_PROFILE_COUNT -) - -// Compute Instance Engine Profiles -const ( - COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED = C.NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_SHARED - COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT = C.NVML_COMPUTE_INSTANCE_ENGINE_PROFILE_COUNT -) - -// Opaque GPUInstance type -type GPUInstance struct { - handle C.nvmlGpuInstance_t - device *Device -} - -// type GPUInstancePlacement C.nvmlGpuInstancePlacement_t -// Generated using `go tool cgo -godefs mig.go` -type GPUInstancePlacement struct { - Start uint32 - Size uint32 -} - -// type GPUInstanceProfileInfo C.nvmlGpuInstanceProfileInfo_t -// Generated using `go tool cgo -godefs mig.go` -type GPUInstanceProfileInfo struct { - ID uint32 - IsP2pSupported uint32 - SliceCount uint32 - InstanceCount uint32 - MultiprocessorCount uint32 - CopyEngineCount uint32 - DecoderCount uint32 - EncoderCount uint32 - JpegCount uint32 - OfaCount uint32 - MemorySizeMB uint64 -} - -// type GPUInstanceInfo_t C.nvmlGpuInstanceInfo_t -// Generated using `go tool cgo -godefs mig.go` -type GPUInstanceInfo struct { - Device *Device - ID uint32 - ProfileID uint32 - Placement GPUInstancePlacement -} - -// Opaque ComputeInstance type -type ComputeInstance struct { - handle C.nvmlComputeInstance_t - gpuInstance GPUInstance -} - -// type ComputeInstanceProfileInfo C.nvmlComputeInstanceProfileInfo_t -// Generated using `go tool cgo -godefs mig.go` -type ComputeInstanceProfileInfo struct { - ID uint32 - SliceCount uint32 - InstanceCount uint32 - MultiprocessorCount uint32 - SharedCopyEngineCount uint32 - SharedDecoderCount uint32 - SharedEncoderCount uint32 - SharedJpegCount uint32 - SharedOfaCount uint32 -} - -// type ComputeInstanceInfo C.nvmlComputeInstanceInfo_t -// Generated using `go tool cgo -godefs mig.go` -type ComputeInstanceInfo struct { - Device *Device - GPUInstance GPUInstance - ID uint32 - ProfileID uint32 -} - -// type DeviceAttributes C.nvmlDeviceAttributes_t -// Generated using `go tool cgo -godefs mig.go` -type DeviceAttributes struct { - MultiprocessorCount uint32 - SharedCopyEngineCount uint32 - SharedDecoderCount uint32 - SharedEncoderCount uint32 - SharedJpegCount uint32 - SharedOfaCount uint32 - GpuInstanceSliceCount uint32 - ComputeInstanceSliceCount uint32 - MemorySizeMB uint64 -} - -// Device.SetMigMode() -func (d *Device) SetMigMode(mode int) (activationStatus error, err error) { - ret := dl.lookupSymbol("nvmlDeviceSetMigMode") - if ret != C.NVML_SUCCESS { - return nil, errorString(ret) - } - - var as C.nvmlReturn_t - ret = C.nvmlDeviceSetMigMode(d.handle.dev, C.uint(mode), &as) - return errorString(as), errorString(ret) -} - -// Device.GetMigMode() -func (d *Device) GetMigMode() (currentMode, pendingMode int, err error) { - ret := dl.lookupSymbol("nvmlDeviceGetMigMode") - if ret != C.NVML_SUCCESS { - return 0, 0, errorString(ret) - } - - var cm, pm C.uint - ret = C.nvmlDeviceGetMigMode(d.handle.dev, &cm, &pm) - return int(cm), int(pm), errorString(ret) -} - -// Device.GetGPUInstanceProfileInfo() -func (d *Device) GetGPUInstanceProfileInfo(profile int) (profileInfo GPUInstanceProfileInfo, err error) { - ret := dl.lookupSymbol("nvmlDeviceGetGpuInstanceProfileInfo") - if ret != C.NVML_SUCCESS { - return GPUInstanceProfileInfo{}, errorString(ret) - } - - var pi C.nvmlGpuInstanceProfileInfo_t - ret = C.nvmlDeviceGetGpuInstanceProfileInfo(d.handle.dev, C.uint(profile), &pi) - return *(*GPUInstanceProfileInfo)(unsafe.Pointer(&pi)), errorString(ret) -} - -// Device.GetGPUInstancePossiblePlacements() -func (d *Device) GetGPUInstancePossiblePlacements(profileInfo *GPUInstanceProfileInfo) (placement GPUInstancePlacement, count int, err error) { - ret := dl.lookupSymbol("nvmlDeviceGetGpuInstancePossiblePlacements") - if ret != C.NVML_SUCCESS { - return GPUInstancePlacement{}, 0, errorString(ret) - } - - var pi C.nvmlGpuInstancePlacement_t - var c C.uint - ret = C.nvmlDeviceGetGpuInstancePossiblePlacements(d.handle.dev, C.uint(profileInfo.ID), &pi, &c) - return *(*GPUInstancePlacement)(unsafe.Pointer(&pi)), int(c), errorString(ret) -} - -// Device.GPUInstanceRemainingCapacity() -func (d *Device) GPUInstanceRemainingCapacity(profileInfo *GPUInstanceProfileInfo) (count int, err error) { - ret := dl.lookupSymbol("nvmlDeviceGetGpuInstanceRemainingCapacity") - if ret != C.NVML_SUCCESS { - return 0, errorString(ret) - } - - var c C.uint - ret = C.nvmlDeviceGetGpuInstanceRemainingCapacity(d.handle.dev, C.uint(profileInfo.ID), &c) - return int(c), errorString(ret) -} - -// Device.CreateGPUInstance() -func (d *Device) CreateGPUInstance(profileInfo *GPUInstanceProfileInfo) (gpuInstance GPUInstance, err error) { - ret := dl.lookupSymbol("nvmlDeviceCreateGpuInstance") - if ret != C.NVML_SUCCESS { - return GPUInstance{}, errorString(ret) - } - - var gi C.nvmlGpuInstance_t - ret = C.nvmlDeviceCreateGpuInstance(d.handle.dev, C.uint(profileInfo.ID), &gi) - return GPUInstance{gi, d}, errorString(ret) -} - -// GPUInstance.Destroy() -func (g *GPUInstance) Destroy() (err error) { - ret := dl.lookupSymbol("nvmlGpuInstanceDestroy") - if ret != C.NVML_SUCCESS { - return errorString(ret) - } - - ret = C.nvmlGpuInstanceDestroy(g.handle) - return errorString(ret) -} - -// Device.GetGPUInstances() -func (d *Device) GetGPUInstances(profileInfo *GPUInstanceProfileInfo) (gpuInstances []GPUInstance, err error) { - ret := dl.lookupSymbol("nvmlDeviceGetGpuInstances") - if ret != C.NVML_SUCCESS { - return nil, errorString(ret) - } - - gis := make([]C.nvmlGpuInstance_t, profileInfo.InstanceCount) - var c C.uint - ret = C.nvmlDeviceGetGpuInstances(d.handle.dev, C.uint(profileInfo.ID), &gis[0], &c) - for i := 0; i < int(c); i++ { - gpuInstances = append(gpuInstances, GPUInstance{gis[i], d}) - } - return gpuInstances, errorString(ret) -} - -// Device.GetGPUInstanceByID() -func (d *Device) GetGPUInstanceByID(id int) (gpuInstance GPUInstance, err error) { - ret := dl.lookupSymbol("nvmlDeviceGetGpuInstanceById") - if ret != C.NVML_SUCCESS { - return GPUInstance{}, errorString(ret) - } - - var gi C.nvmlGpuInstance_t - ret = C.nvmlDeviceGetGpuInstanceById(d.handle.dev, C.uint(id), &gi) - return GPUInstance{gi, d}, errorString(ret) -} - -// GPUInstance.GetInfo() -func (g *GPUInstance) GetInfo() (info GPUInstanceInfo, err error) { - ret := dl.lookupSymbol("nvmlGpuInstanceGetInfo") - if ret != C.NVML_SUCCESS { - return GPUInstanceInfo{}, errorString(ret) - } - - var gii C.nvmlGpuInstanceInfo_t - ret = C.nvmlGpuInstanceGetInfo(g.handle, &gii) - info = *(*GPUInstanceInfo)(unsafe.Pointer(&gii)) - info.Device = g.device - return info, errorString(ret) -} - -// GPUInstance.GetComputeInstanceProfileInfo() -func (g *GPUInstance) GetComputeInstanceProfileInfo(profile int, engProfile int) (profileInfo ComputeInstanceProfileInfo, err error) { - ret := dl.lookupSymbol("nvmlGpuInstanceGetComputeInstanceProfileInfo") - if ret != C.NVML_SUCCESS { - return ComputeInstanceProfileInfo{}, errorString(ret) - } - - var pi C.nvmlComputeInstanceProfileInfo_t - ret = C.nvmlGpuInstanceGetComputeInstanceProfileInfo(g.handle, C.uint(profile), C.uint(engProfile), &pi) - return *(*ComputeInstanceProfileInfo)(unsafe.Pointer(&pi)), errorString(ret) -} - -// GPUInstance.ComputeInstanceRemainingCapacity() -func (g *GPUInstance) ComputeInstanceRemainingCapacity(profileInfo *GPUInstanceProfileInfo) (count int, err error) { - ret := dl.lookupSymbol("nvmlGpuInstanceGetComputeInstanceRemainingCapacity") - if ret != C.NVML_SUCCESS { - return 0, errorString(ret) - } - - var c C.uint - ret = C.nvmlGpuInstanceGetComputeInstanceRemainingCapacity(g.handle, C.uint(profileInfo.ID), &c) - return int(c), errorString(ret) -} - -// GPUInstance.CreateComputeInstance() -func (g *GPUInstance) CreateComputeInstance(profileInfo *ComputeInstanceProfileInfo) (computeInstance ComputeInstance, err error) { - ret := dl.lookupSymbol("nvmlGpuInstanceCreateComputeInstance") - if ret != C.NVML_SUCCESS { - return ComputeInstance{}, errorString(ret) - } - - var ci C.nvmlComputeInstance_t - ret = C.nvmlGpuInstanceCreateComputeInstance(g.handle, C.uint(profileInfo.ID), &ci) - return ComputeInstance{ci, *g}, errorString(ret) -} - -// ComputeInstance.Destroy() -func (c *ComputeInstance) Destroy() (err error) { - ret := dl.lookupSymbol("nvmlComputeInstanceDestroy") - if ret != C.NVML_SUCCESS { - return errorString(ret) - } - - ret = C.nvmlComputeInstanceDestroy(c.handle) - return errorString(ret) -} - -// GPUInstance.GetComputeInstances() -func (g *GPUInstance) GetComputeInstances(profileInfo *ComputeInstanceProfileInfo) (computeInstances []ComputeInstance, err error) { - ret := dl.lookupSymbol("nvmlGpuInstanceGetComputeInstances") - if ret != C.NVML_SUCCESS { - return nil, errorString(ret) - } - - cis := make([]C.nvmlComputeInstance_t, profileInfo.InstanceCount) - var c C.uint - ret = C.nvmlGpuInstanceGetComputeInstances(g.handle, C.uint(profileInfo.ID), &cis[0], &c) - for i := 0; i < int(c); i++ { - computeInstances = append(computeInstances, ComputeInstance{cis[i], *g}) - } - return computeInstances, errorString(ret) -} - -// GPUInstance.GetComputeInstanceByID() -func (g *GPUInstance) GetComputeInstanceByID(id int) (computeInstance ComputeInstance, err error) { - ret := dl.lookupSymbol("nvmlGpuInstanceGetComputeInstanceById") - if ret != C.NVML_SUCCESS { - return ComputeInstance{}, errorString(ret) - } - - var ci C.nvmlComputeInstance_t - ret = C.nvmlGpuInstanceGetComputeInstanceById(g.handle, C.uint(id), &ci) - return ComputeInstance{ci, *g}, errorString(ret) -} - -// ComputeInstance.GetInfo() -func (c *ComputeInstance) GetInfo() (info ComputeInstanceInfo, err error) { - ret := dl.lookupSymbol("nvmlComputeInstanceGetInfo") - if ret != C.NVML_SUCCESS { - return ComputeInstanceInfo{}, errorString(ret) - } - - var cii C.nvmlComputeInstanceInfo_t - ret = C.nvmlComputeInstanceGetInfo(c.handle, &cii) - info = *(*ComputeInstanceInfo)(unsafe.Pointer(&cii)) - info.Device = c.gpuInstance.device - info.GPUInstance = c.gpuInstance - return info, errorString(ret) -} - -// Device.IsMigDeviceHandle() -func (d *Device) IsMigDeviceHandle() (isMigDevice bool, err error) { - ret := dl.lookupSymbol("nvmlDeviceIsMigDeviceHandle") - if ret != C.NVML_SUCCESS { - return false, errorString(ret) - } - - var is C.uint - ret = C.nvmlDeviceIsMigDeviceHandle(d.handle.dev, &is) - return (is != 0), errorString(ret) -} - -// Device.GetGPUInstanceId() -func (d *Device) GetGPUInstanceId() (id int, err error) { - ret := dl.lookupSymbol("nvmlDeviceGetGpuInstanceId") - if ret != C.NVML_SUCCESS { - return 0, errorString(ret) - } - - var gi C.uint - ret = C.nvmlDeviceGetGpuInstanceId(d.handle.dev, &gi) - return int(gi), errorString(ret) -} - -// Device.GetComputeInstanceId() -func (d *Device) GetComputeInstanceId() (id int, err error) { - ret := dl.lookupSymbol("nvmlDeviceGetComputeInstanceId") - if ret != C.NVML_SUCCESS { - return 0, errorString(ret) - } - - var ci C.uint - ret = C.nvmlDeviceGetComputeInstanceId(d.handle.dev, &ci) - return int(ci), errorString(ret) -} - -// Device.GetMaxMigDeviceCount() -func (d *Device) GetMaxMigDeviceCount() (count int, err error) { - ret := dl.lookupSymbol("nvmlDeviceGetMaxMigDeviceCount") - if ret != C.NVML_SUCCESS { - return 0, errorString(ret) - } - - var c C.uint - ret = C.nvmlDeviceGetMaxMigDeviceCount(d.handle.dev, &c) - return int(c), errorString(ret) -} - -// Device.GetMigDeviceHandleByIndex() -func (d *Device) GetMigDeviceHandleByIndex(index int) (migDevice *Device, err error) { - ret := dl.lookupSymbol("nvmlDeviceGetMigDeviceHandleByIndex") - if ret != C.NVML_SUCCESS { - return nil, errorString(ret) - } - - var m C.nvmlDevice_t - ret = C.nvmlDeviceGetMigDeviceHandleByIndex(d.handle.dev, C.uint(index), &m) - return &Device{handle: handle{m}}, errorString(ret) -} - -// Device.GetMigDeviceHandleByIndex() -func (d *Device) GetDeviceHandleFromMigDeviceHandle() (device *Device, err error) { - ret := dl.lookupSymbol("nvmlDeviceGetDeviceHandleFromMigDeviceHandle") - if ret != C.NVML_SUCCESS { - return nil, errorString(ret) - } - - var parent C.nvmlDevice_t - ret = C.nvmlDeviceGetDeviceHandleFromMigDeviceHandle(d.handle.dev, &parent) - return &Device{handle: handle{parent}}, errorString(ret) -} - -// Device.GetAttributes() -func (d *Device) GetAttributes() (attr DeviceAttributes, err error) { - ret := dl.lookupSymbol("nvmlDeviceGetAttributes") - if ret != C.NVML_SUCCESS { - return DeviceAttributes{}, errorString(ret) - } - - var a C.nvmlDeviceAttributes_t - ret = C.nvmlDeviceGetAttributes(d.handle.dev, &a) - return *(*DeviceAttributes)(unsafe.Pointer(&a)), errorString(ret) -} diff --git a/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml.go b/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml.go deleted file mode 100644 index c7ab5a1..0000000 --- a/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml.go +++ /dev/null @@ -1,822 +0,0 @@ -// Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. - -package nvml - -// #include "nvml.h" -import "C" - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "runtime" - "strconv" - "strings" -) - -var ( - ErrCPUAffinity = errors.New("failed to retrieve CPU affinity") - ErrUnsupportedP2PLink = errors.New("unsupported P2P link type") - ErrUnsupportedGPU = errors.New("unsupported GPU device") -) - -type ModeState uint - -const ( - Disabled ModeState = iota - Enabled -) - -func (m ModeState) String() string { - switch m { - case Enabled: - return "Enabled" - case Disabled: - return "Disabled" - } - return "N/A" -} - -type Display struct { - Mode ModeState - Active ModeState -} - -type Accounting struct { - Mode ModeState - BufferSize *uint -} - -type DeviceMode struct { - DisplayInfo Display - Persistence ModeState - AccountingInfo Accounting -} - -type ThrottleReason uint - -const ( - ThrottleReasonGpuIdle ThrottleReason = iota - ThrottleReasonApplicationsClocksSetting - ThrottleReasonSwPowerCap - ThrottleReasonHwSlowdown - ThrottleReasonSyncBoost - ThrottleReasonSwThermalSlowdown - ThrottleReasonHwThermalSlowdown - ThrottleReasonHwPowerBrakeSlowdown - ThrottleReasonDisplayClockSetting - ThrottleReasonNone - ThrottleReasonUnknown -) - -func (r ThrottleReason) String() string { - switch r { - case ThrottleReasonGpuIdle: - return "Gpu Idle" - case ThrottleReasonApplicationsClocksSetting: - return "Applications Clocks Setting" - case ThrottleReasonSwPowerCap: - return "SW Power Cap" - case ThrottleReasonHwSlowdown: - return "HW Slowdown" - case ThrottleReasonSyncBoost: - return "Sync Boost" - case ThrottleReasonSwThermalSlowdown: - return "SW Thermal Slowdown" - case ThrottleReasonHwThermalSlowdown: - return "HW Thermal Slowdown" - case ThrottleReasonHwPowerBrakeSlowdown: - return "HW Power Brake Slowdown" - case ThrottleReasonDisplayClockSetting: - return "Display Clock Setting" - case ThrottleReasonNone: - return "No clocks throttling" - } - return "N/A" -} - -type PerfState uint - -const ( - PerfStateMax = 0 - PerfStateMin = 15 - PerfStateUnknown = 32 -) - -func (p PerfState) String() string { - if p >= PerfStateMax && p <= PerfStateMin { - return fmt.Sprintf("P%d", p) - } - return "Unknown" -} - -type ProcessType uint - -const ( - Compute ProcessType = iota - Graphics - ComputeAndGraphics -) - -func (t ProcessType) String() string { - typ := "C+G" - if t == Compute { - typ = "C" - } else if t == Graphics { - typ = "G" - } - return typ -} - -type P2PLinkType uint - -const ( - P2PLinkUnknown P2PLinkType = iota - P2PLinkCrossCPU - P2PLinkSameCPU - P2PLinkHostBridge - P2PLinkMultiSwitch - P2PLinkSingleSwitch - P2PLinkSameBoard - SingleNVLINKLink - TwoNVLINKLinks - ThreeNVLINKLinks - FourNVLINKLinks - FiveNVLINKLinks - SixNVLINKLinks - SevenNVLINKLinks - EightNVLINKLinks - NineNVLINKLinks - TenNVLINKLinks - ElevenNVLINKLinks - TwelveNVLINKLinks -) - -type P2PLink struct { - BusID string - Link P2PLinkType -} - -func (t P2PLinkType) String() string { - switch t { - case P2PLinkCrossCPU: - return "Cross CPU socket" - case P2PLinkSameCPU: - return "Same CPU socket" - case P2PLinkHostBridge: - return "Host PCI bridge" - case P2PLinkMultiSwitch: - return "Multiple PCI switches" - case P2PLinkSingleSwitch: - return "Single PCI switch" - case P2PLinkSameBoard: - return "Same board" - case SingleNVLINKLink: - return "Single NVLink" - case TwoNVLINKLinks: - return "Two NVLinks" - case ThreeNVLINKLinks: - return "Three NVLinks" - case FourNVLINKLinks: - return "Four NVLinks" - case FiveNVLINKLinks: - return "Five NVLinks" - case SixNVLINKLinks: - return "Six NVLinks" - case SevenNVLINKLinks: - return "Seven NVLinks" - case EightNVLINKLinks: - return "Eight NVLinks" - case NineNVLINKLinks: - return "Nine NVLinks" - case TenNVLINKLinks: - return "Ten NVLinks" - case ElevenNVLINKLinks: - return "Eleven NVLinks" - case TwelveNVLINKLinks: - return "Twelve NVLinks" - case P2PLinkUnknown: - } - return "N/A" -} - -type ClockInfo struct { - Cores *uint - Memory *uint -} - -type PCIInfo struct { - BusID string - BAR1 *uint64 - Bandwidth *uint -} - -type CudaComputeCapabilityInfo struct { - Major *int - Minor *int -} - -type Device struct { - handle - - UUID string - Path string - Model *string - Power *uint - Memory *uint64 - CPUAffinity *uint - PCI PCIInfo - Clocks ClockInfo - Topology []P2PLink - CudaComputeCapability CudaComputeCapabilityInfo -} - -type UtilizationInfo struct { - GPU *uint - Memory *uint - Encoder *uint - Decoder *uint -} - -type PCIThroughputInfo struct { - RX *uint - TX *uint -} - -type PCIStatusInfo struct { - BAR1Used *uint64 - Throughput PCIThroughputInfo -} - -type ECCErrorsInfo struct { - L1Cache *uint64 - L2Cache *uint64 - Device *uint64 -} - -type DeviceMemory struct { - Used *uint64 - Free *uint64 -} - -type MemoryInfo struct { - Global DeviceMemory - ECCErrors ECCErrorsInfo -} - -type ProcessInfo struct { - PID uint - Name string - MemoryUsed uint64 - Type ProcessType -} - -type DeviceStatus struct { - Power *uint - FanSpeed *uint - Temperature *uint - Utilization UtilizationInfo - Memory MemoryInfo - Clocks ClockInfo - PCI PCIStatusInfo - Processes []ProcessInfo - Throttle ThrottleReason - Performance PerfState -} - -func assert(err error) { - if err != nil { - panic(err) - } -} - -func Init() error { - return init_() -} - -func Shutdown() error { - return shutdown() -} - -func GetDeviceCount() (uint, error) { - return deviceGetCount() -} - -func GetDriverVersion() (string, error) { - return systemGetDriverVersion() -} - -func GetCudaDriverVersion() (*uint, *uint, error) { - return systemGetCudaDriverVersion() -} - -func numaNode(busid string) (*uint, error) { - // discard leading zeros of busid - b, err := ioutil.ReadFile(fmt.Sprintf("/sys/bus/pci/devices/%s/numa_node", strings.ToLower(busid[4:]))) - if err != nil { - // XXX report nil if NUMA support isn't enabled - return nil, nil - } - node, err := strconv.ParseInt(string(bytes.TrimSpace(b)), 10, 8) - if err != nil { - return nil, fmt.Errorf("%v: %v", ErrCPUAffinity, err) - } - if node < 0 { - // XXX report nil instead of NUMA_NO_NODE - return nil, nil - } - - numaNode := uint(node) - return &numaNode, nil -} - -func pciBandwidth(gen, width *uint) *uint { - m := map[uint]uint{ - 1: 250, // MB/s - 2: 500, - 3: 985, - 4: 1969, - } - if gen == nil || width == nil { - return nil - } - bw := m[*gen] * *width - return &bw -} - -func NewDevice(idx uint) (device *Device, err error) { - defer func() { - if r := recover(); r != nil { - err = r.(error) - } - }() - - h, err := deviceGetHandleByIndex(idx) - assert(err) - - device, err = newDevice(h) - assert(err) - - return device, err -} - -func NewDeviceByUUID(uuid string) (device *Device, err error) { - defer func() { - if r := recover(); r != nil { - err = r.(error) - } - }() - - h, err := deviceGetHandleByUUID(uuid) - assert(err) - - device, err = newDevice(h) - assert(err) - - return device, err -} - -func newDevice(h handle) (device *Device, err error) { - model, err := h.deviceGetName() - assert(err) - uuid, err := h.deviceGetUUID() - assert(err) - minor, err := h.deviceGetMinorNumber() - assert(err) - power, err := h.deviceGetPowerManagementLimit() - assert(err) - totalMem, _, err := h.deviceGetMemoryInfo() - assert(err) - busid, err := h.deviceGetPciInfo() - assert(err) - bar1, _, err := h.deviceGetBAR1MemoryInfo() - assert(err) - pcig, err := h.deviceGetMaxPcieLinkGeneration() - assert(err) - pciw, err := h.deviceGetMaxPcieLinkWidth() - assert(err) - ccore, cmem, err := h.deviceGetMaxClockInfo() - assert(err) - cccMajor, cccMinor, err := h.deviceGetCudaComputeCapability() - assert(err) - - var path string - if runtime.GOOS == "windows" { - if busid == nil || uuid == nil { - return nil, ErrUnsupportedGPU - } - } else { - if minor == nil || busid == nil || uuid == nil { - return nil, ErrUnsupportedGPU - } - path = fmt.Sprintf("/dev/nvidia%d", *minor) - } - node, err := numaNode(*busid) - assert(err) - - device = &Device{ - handle: h, - UUID: *uuid, - Path: path, - Model: model, - Power: power, - Memory: totalMem, - CPUAffinity: node, - PCI: PCIInfo{ - BusID: *busid, - BAR1: bar1, - Bandwidth: pciBandwidth(pcig, pciw), // MB/s - }, - Clocks: ClockInfo{ - Cores: ccore, // MHz - Memory: cmem, // MHz - }, - CudaComputeCapability: CudaComputeCapabilityInfo{ - Major: cccMajor, - Minor: cccMinor, - }, - } - if power != nil { - *device.Power /= 1000 // W - } - if bar1 != nil { - *device.PCI.BAR1 /= 1024 * 1024 // MiB - } - return -} - -func NewDeviceLite(idx uint) (device *Device, err error) { - defer func() { - if r := recover(); r != nil { - err = r.(error) - } - }() - - h, err := deviceGetHandleByIndex(idx) - assert(err) - - device, err = newDeviceLite(h) - assert(err) - - return device, err -} - -func NewDeviceLiteByUUID(uuid string) (device *Device, err error) { - defer func() { - if r := recover(); r != nil { - err = r.(error) - } - }() - - h, err := deviceGetHandleByUUID(uuid) - assert(err) - - device, err = newDeviceLite(h) - assert(err) - - return device, err -} - -func newDeviceLite(h handle) (device *Device, err error) { - uuid, err := h.deviceGetUUID() - assert(err) - minor, err := h.deviceGetMinorNumber() - assert(err) - busid, err := h.deviceGetPciInfo() - assert(err) - - if minor == nil || busid == nil || uuid == nil { - return nil, ErrUnsupportedGPU - } - path := fmt.Sprintf("/dev/nvidia%d", *minor) - node, err := numaNode(*busid) - assert(err) - - device = &Device{ - handle: h, - UUID: *uuid, - Path: path, - CPUAffinity: node, - PCI: PCIInfo{ - BusID: *busid, - }, - } - return -} - -func (d *Device) Status() (status *DeviceStatus, err error) { - defer func() { - if r := recover(); r != nil { - err = r.(error) - } - }() - - power, err := d.deviceGetPowerUsage() - assert(err) - fanSpeed, err := d.deviceGetFanSpeed() - assert(err) - temp, err := d.deviceGetTemperature() - assert(err) - ugpu, umem, err := d.deviceGetUtilizationRates() - assert(err) - uenc, err := d.deviceGetEncoderUtilization() - assert(err) - udec, err := d.deviceGetDecoderUtilization() - assert(err) - _, devMem, err := d.deviceGetMemoryInfo() - assert(err) - ccore, cmem, err := d.deviceGetClockInfo() - assert(err) - _, bar1, err := d.deviceGetBAR1MemoryInfo() - assert(err) - el1, el2, emem, err := d.deviceGetMemoryErrorCounter() - assert(err) - pcirx, pcitx, err := d.deviceGetPcieThroughput() - assert(err) - throttle, err := d.getClocksThrottleReasons() - assert(err) - perfState, err := d.getPerformanceState() - assert(err) - processInfo, err := d.deviceGetAllRunningProcesses() - assert(err) - - status = &DeviceStatus{ - Power: power, - FanSpeed: fanSpeed, // % - Temperature: temp, // °C - Utilization: UtilizationInfo{ - GPU: ugpu, // % - Memory: umem, // % - Encoder: uenc, // % - Decoder: udec, // % - }, - Memory: MemoryInfo{ - Global: devMem, - ECCErrors: ECCErrorsInfo{ - L1Cache: el1, - L2Cache: el2, - Device: emem, - }, - }, - Clocks: ClockInfo{ - Cores: ccore, // MHz - Memory: cmem, // MHz - }, - PCI: PCIStatusInfo{ - BAR1Used: bar1, - Throughput: PCIThroughputInfo{ - RX: pcirx, - TX: pcitx, - }, - }, - Throttle: throttle, - Performance: perfState, - Processes: processInfo, - } - if power != nil { - *status.Power /= 1000 // W - } - if bar1 != nil { - *status.PCI.BAR1Used /= 1024 * 1024 // MiB - } - if pcirx != nil { - *status.PCI.Throughput.RX /= 1000 // MB/s - } - if pcitx != nil { - *status.PCI.Throughput.TX /= 1000 // MB/s - } - return -} - -func GetP2PLink(dev1, dev2 *Device) (link P2PLinkType, err error) { - level, err := deviceGetTopologyCommonAncestor(dev1.handle, dev2.handle) - if err != nil || level == nil { - return P2PLinkUnknown, err - } - - switch *level { - case C.NVML_TOPOLOGY_INTERNAL: - link = P2PLinkSameBoard - case C.NVML_TOPOLOGY_SINGLE: - link = P2PLinkSingleSwitch - case C.NVML_TOPOLOGY_MULTIPLE: - link = P2PLinkMultiSwitch - case C.NVML_TOPOLOGY_HOSTBRIDGE: - link = P2PLinkHostBridge - case C.NVML_TOPOLOGY_CPU: - link = P2PLinkSameCPU - case C.NVML_TOPOLOGY_SYSTEM: - link = P2PLinkCrossCPU - default: - err = ErrUnsupportedP2PLink - } - return -} - -func GetNVLink(dev1, dev2 *Device) (link P2PLinkType, err error) { - nvbusIds1, err := dev1.handle.deviceGetAllNvLinkRemotePciInfo() - if err != nil || nvbusIds1 == nil { - return P2PLinkUnknown, err - } - - nvlink := P2PLinkUnknown - for _, nvbusID1 := range nvbusIds1 { - if *nvbusID1 == dev2.PCI.BusID { - switch nvlink { - case P2PLinkUnknown: - nvlink = SingleNVLINKLink - case SingleNVLINKLink: - nvlink = TwoNVLINKLinks - case TwoNVLINKLinks: - nvlink = ThreeNVLINKLinks - case ThreeNVLINKLinks: - nvlink = FourNVLINKLinks - case FourNVLINKLinks: - nvlink = FiveNVLINKLinks - case FiveNVLINKLinks: - nvlink = SixNVLINKLinks - case SixNVLINKLinks: - nvlink = SevenNVLINKLinks - case SevenNVLINKLinks: - nvlink = EightNVLINKLinks - case EightNVLINKLinks: - nvlink = NineNVLINKLinks - case NineNVLINKLinks: - nvlink = TenNVLINKLinks - case TenNVLINKLinks: - nvlink = ElevenNVLINKLinks - case ElevenNVLINKLinks: - nvlink = TwelveNVLINKLinks - } - } - } - - // TODO(klueska): Handle NVSwitch semantics - - return nvlink, nil -} - -func (d *Device) GetComputeRunningProcesses() ([]uint, []uint64, error) { - return d.handle.deviceGetComputeRunningProcesses() -} - -func (d *Device) GetGraphicsRunningProcesses() ([]uint, []uint64, error) { - return d.handle.deviceGetGraphicsRunningProcesses() -} - -func (d *Device) GetAllRunningProcesses() ([]ProcessInfo, error) { - return d.handle.deviceGetAllRunningProcesses() -} - -func (d *Device) GetDeviceMode() (mode *DeviceMode, err error) { - defer func() { - if r := recover(); r != nil { - err = r.(error) - } - }() - - display, err := d.getDisplayInfo() - assert(err) - - p, err := d.getPeristenceMode() - assert(err) - - accounting, err := d.getAccountingInfo() - assert(err) - - mode = &DeviceMode{ - DisplayInfo: display, - Persistence: p, - AccountingInfo: accounting, - } - return -} - -func (d *Device) IsMigEnabled() (bool, error) { - return d.handle.isMigEnabled() -} - -func (d *Device) GetMigDevices() ([]*Device, error) { - handles, err := d.handle.getMigDevices() - if err != nil { - return nil, err - } - - var devices []*Device - for _, h := range handles { - uuid, err := h.deviceGetUUID() - if err != nil { - return nil, err - } - - model, err := d.deviceGetName() - if err != nil { - return nil, err - } - - totalMem, _, err := h.deviceGetMemoryInfo() - if err != nil { - return nil, err - } - - device := &Device{ - handle: h, - UUID: *uuid, - Model: model, - Memory: totalMem, - CPUAffinity: d.CPUAffinity, - Path: d.Path, - } - - devices = append(devices, device) - } - - return devices, nil -} - -func (d *Device) GetMigParentDevice() (*Device, error) { - parent, err := d.handle.deviceGetDeviceHandleFromMigDeviceHandle() - if err != nil { - return nil, err - } - - index, err := parent.deviceGetIndex() - if err != nil { - return nil, err - } - - return NewDevice(*index) -} - -func (d *Device) GetMigParentDeviceLite() (*Device, error) { - parent, err := d.handle.deviceGetDeviceHandleFromMigDeviceHandle() - if err != nil { - return nil, err - } - - index, err := parent.deviceGetIndex() - if err != nil { - return nil, err - } - - return NewDeviceLite(*index) -} - -func ParseMigDeviceUUID(uuid string) (string, uint, uint, error) { - migHandle, err := deviceGetHandleByUUID(uuid) - if err == nil { - return getMIGDeviceInfo(migHandle) - } - return parseMigDeviceUUID(uuid) -} - -func getMIGDeviceInfo(migHandle handle) (string, uint, uint, error) { - parentHandle, err := migHandle.deviceGetDeviceHandleFromMigDeviceHandle() - if err != nil { - return "", 0, 0, err - } - - parentUUID, err := parentHandle.deviceGetUUID() - if err != nil { - return "", 0, 0, err - } - - migDevice := Device{handle: migHandle} - - gi, err := migDevice.GetGPUInstanceId() - if err != nil { - return "", 0, 0, err - } - - ci, err := migDevice.GetComputeInstanceId() - if err != nil { - return "", 0, 0, err - } - - return *parentUUID, uint(gi), uint(ci), err -} - -func parseMigDeviceUUID(mig string) (string, uint, uint, error) { - tokens := strings.SplitN(mig, "-", 2) - if len(tokens) != 2 || tokens[0] != "MIG" { - return "", 0, 0, fmt.Errorf("Unable to parse UUID as MIG device") - } - - tokens = strings.SplitN(tokens[1], "/", 3) - if len(tokens) != 3 || !strings.HasPrefix(tokens[0], "GPU-") { - return "", 0, 0, fmt.Errorf("Unable to parse UUID as MIG device") - } - - gi, err := strconv.Atoi(tokens[1]) - if err != nil { - return "", 0, 0, fmt.Errorf("Unable to parse UUID as MIG device") - } - - ci, err := strconv.Atoi(tokens[2]) - if err != nil { - return "", 0, 0, fmt.Errorf("Unable to parse UUID as MIG device") - } - - return tokens[0], uint(gi), uint(ci), nil -} diff --git a/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml_dl.go b/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml_dl.go deleted file mode 100644 index 21da6dd..0000000 --- a/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml_dl.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. - -// +build linux darwin - -package nvml - -import ( - "unsafe" -) - -/* -#include -#include "nvml.h" - -// We wrap the call to nvmlInit() here to ensure that we pick up the correct -// version of this call. The macro magic in nvml.h that #defines the symbol -// 'nvmlInit' to 'nvmlInit_v2' is unfortunately lost on cgo. -static nvmlReturn_t nvmlInit_dl(void) { - return nvmlInit(); -} -*/ -import "C" - -type dlhandles struct{ handles []unsafe.Pointer } - -var dl dlhandles - -// Initialize NVML, opening a dynamic reference to the NVML library in the process. -func (dl *dlhandles) nvmlInit() C.nvmlReturn_t { - handle := C.dlopen(C.CString("libnvidia-ml.so.1"), C.RTLD_LAZY|C.RTLD_GLOBAL) - if handle == C.NULL { - return C.NVML_ERROR_LIBRARY_NOT_FOUND - } - dl.handles = append(dl.handles, handle) - return C.nvmlInit_dl() -} - -// Shutdown NVML, closing our dynamic reference to the NVML library in the process. -func (dl *dlhandles) nvmlShutdown() C.nvmlReturn_t { - ret := C.nvmlShutdown() - if ret != C.NVML_SUCCESS { - return ret - } - - for _, handle := range dl.handles { - err := C.dlclose(handle) - if err != 0 { - return C.NVML_ERROR_UNKNOWN - } - } - - return C.NVML_SUCCESS -} - -// Check to see if a specific symbol is present in the NVML library. -func (dl *dlhandles) lookupSymbol(symbol string) C.nvmlReturn_t { - for _, handle := range dl.handles { - C.dlerror() - C.dlsym(handle, C.CString(symbol)) - if unsafe.Pointer(C.dlerror()) == C.NULL { - return C.NVML_SUCCESS - } - } - return C.NVML_ERROR_FUNCTION_NOT_FOUND -} diff --git a/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml_dl_windows.go b/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml_dl_windows.go deleted file mode 100644 index 4b94170..0000000 --- a/vendor/github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml/nvml_dl_windows.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright (c) 2015-2018, NVIDIA CORPORATION. All rights reserved. - -// +build windows - -package nvml - -import ( - "syscall" -) - -/* -#include "nvml.h" - -// We wrap the call to nvmlInit() here to ensure that we pick up the correct -// version of this call. The macro magic in nvml.h that #defines the symbol -// 'nvmlInit' to 'nvmlInit_v2' is unfortunately lost on cgo. -static nvmlReturn_t nvmlInit_dl(void) { - return nvmlInit(); -} -*/ -import "C" - -type dlhandles struct{ handles []*syscall.LazyDLL } - -var dl dlhandles - -// Initialize NVML, opening a dynamic reference to the NVML library in the process. -func (dl *dlhandles) nvmlInit() C.nvmlReturn_t { - handle := syscall.NewLazyDLL("nvml.dll") - if handle == nil { - return C.NVML_ERROR_LIBRARY_NOT_FOUND - } - dl.handles = append(dl.handles, handle) - return C.nvmlInit_dl() -} - -// Shutdown NVML, closing our dynamic reference to the NVML library in the process. -func (dl *dlhandles) nvmlShutdown() C.nvmlReturn_t { - ret := C.nvmlShutdown() - if ret != C.NVML_SUCCESS { - return ret - } - - dl.handles = dl.handles[:0] - - return C.NVML_SUCCESS -} - -// Check to see if a specific symbol is present in the NVML library. -func (dl *dlhandles) lookupSymbol(symbol string) C.nvmlReturn_t { - for _, handle := range dl.handles { - if proc := handle.NewProc(symbol); proc != nil { - return C.NVML_SUCCESS - } - } - return C.NVML_ERROR_FUNCTION_NOT_FOUND -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 501a220..036963f 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,6 +1,7 @@ -# github.com/NVIDIA/gpu-monitoring-tools v0.0.0-20201109160820-d08ea3cdcce4 -## explicit -github.com/NVIDIA/gpu-monitoring-tools/bindings/go/nvml +# github.com/NVIDIA/go-nvml v0.12.0-1 +## explicit; go 1.15 +github.com/NVIDIA/go-nvml/pkg/dl +github.com/NVIDIA/go-nvml/pkg/nvml # k8s.io/api => k8s.io/api v0.18.2 # k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.18.2 # k8s.io/apimachinery => k8s.io/apimachinery v0.18.2